Merge branch 'master' into src_core_tsi

pull/37030/head
tanvi-jagtap 8 months ago
commit be0b6ee33c
  1. 59
      CMakeLists.txt
  2. 1
      Makefile
  3. 2
      Package.swift
  4. 54
      build_autogenerated.yaml
  5. 1
      config.m4
  6. 1
      config.w32
  7. 265
      doc/grpc_xds_bootstrap_format.md
  8. 1
      doc/trace_flags.md
  9. 2
      gRPC-C++.podspec
  10. 3
      gRPC-Core.podspec
  11. 2
      grpc.gemspec
  12. 2
      package.xml
  13. 162
      src/core/BUILD
  14. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  15. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.h
  16. 328
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  17. 5
      src/core/handshaker/handshaker.cc
  18. 2
      src/core/lib/debug/trace_flags.cc
  19. 1
      src/core/lib/debug/trace_flags.h
  20. 4
      src/core/lib/debug/trace_flags.yaml
  21. 58
      src/core/lib/iomgr/tcp_posix.cc
  22. 66
      src/core/lib/iomgr/tcp_server_posix.cc
  23. 752
      src/core/lib/transport/call_filters.cc
  24. 398
      src/core/lib/transport/call_filters.h
  25. 10
      src/core/lib/transport/call_spine.h
  26. 39
      src/core/lib/transport/call_state.cc
  27. 957
      src/core/lib/transport/call_state.h
  28. 15
      src/core/lib/transport/interception_chain.cc
  29. 60
      src/core/lib/transport/interception_chain.h
  30. 4
      src/core/load_balancing/subchannel_interface.h
  31. 5
      src/python/grpcio/grpc/aio/_channel.py
  32. 1
      src/python/grpcio/grpc_core_dependencies.py
  33. 2
      test/core/call/bm_client_call.cc
  34. 2
      test/core/call/client_call_test.cc
  35. 2
      test/core/call/server_call_test.cc
  36. 2
      test/core/client_channel/client_channel_test.cc
  37. 2
      test/core/client_channel/load_balanced_call_destination_test.cc
  38. 1
      test/core/surface/channel_init_test.cc
  39. 15
      test/core/transport/BUILD
  40. 10
      test/core/transport/bm_call_spine.cc
  41. 339
      test/core/transport/call_filters_test.cc
  42. 8
      test/core/transport/call_spine_benchmarks.h
  43. 10
      test/core/transport/call_spine_test.cc
  44. 310
      test/core/transport/call_state_test.cc
  45. 12
      test/core/transport/chaotic_good/client_transport_error_test.cc
  46. 4
      test/core/transport/chaotic_good/client_transport_test.cc
  47. 2
      test/core/transport/chaotic_good/server_transport_test.cc
  48. 37
      test/core/transport/interception_chain_test.cc
  49. 4
      test/core/transport/test_suite/transport_test.cc
  50. 1
      tools/doxygen/Doxyfile.c++
  51. 3
      tools/doxygen/Doxyfile.c++.internal
  52. 1
      tools/doxygen/Doxyfile.core
  53. 3
      tools/doxygen/Doxyfile.core.internal
  54. 1
      tools/doxygen/Doxyfile.objc
  55. 1
      tools/doxygen/Doxyfile.objc.internal
  56. 1
      tools/doxygen/Doxyfile.php
  57. 24
      tools/run_tests/generated/tests.json
  58. 1
      tools/run_tests/sanity/banned_functions.py

59
CMakeLists.txt generated

@ -991,6 +991,7 @@ if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx call_spine_test)
endif()
add_dependencies(buildtests_cxx call_state_test)
add_dependencies(buildtests_cxx call_tracer_test)
add_dependencies(buildtests_cxx call_utils_test)
add_dependencies(buildtests_cxx cancel_after_accept_test)
@ -2526,6 +2527,7 @@ add_library(grpc
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -3280,6 +3282,7 @@ add_library(grpc_unsecure
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -5403,6 +5406,7 @@ add_library(grpc_authorization_provider
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -8861,6 +8865,7 @@ add_executable(call_filters_test
src/core/lib/surface/channel_stack_type.cc
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/message.cc
src/core/lib/transport/metadata.cc
@ -9087,6 +9092,58 @@ endif()
endif()
if(gRPC_BUILD_TESTS)
add_executable(call_state_test
src/core/lib/debug/trace.cc
src/core/lib/debug/trace_flags.cc
src/core/lib/gprpp/dump_args.cc
src/core/lib/gprpp/glob.cc
src/core/lib/promise/activity.cc
src/core/lib/transport/call_state.cc
test/core/transport/call_state_test.cc
)
if(WIN32 AND MSVC)
if(BUILD_SHARED_LIBS)
target_compile_definitions(call_state_test
PRIVATE
"GPR_DLL_IMPORTS"
)
endif()
endif()
target_compile_features(call_state_test PUBLIC cxx_std_14)
target_include_directories(call_state_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(call_state_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
absl::config
absl::flat_hash_map
absl::hash
absl::type_traits
absl::statusor
gpr
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(call_tracer_test
test/core/telemetry/call_tracer_test.cc
test/core/test_util/fake_stats_plugin.cc
@ -9338,6 +9395,7 @@ add_executable(call_utils_test
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -18808,6 +18866,7 @@ add_executable(interception_chain_test
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc

1
Makefile generated

@ -1343,6 +1343,7 @@ LIBGRPC_SRC = \
src/core/lib/transport/call_filters.cc \
src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
src/core/lib/transport/interception_chain.cc \

2
Package.swift generated

@ -1694,6 +1694,8 @@ let package = Package(
"src/core/lib/transport/call_final_info.h",
"src/core/lib/transport/call_spine.cc",
"src/core/lib/transport/call_spine.h",
"src/core/lib/transport/call_state.cc",
"src/core/lib/transport/call_state.h",
"src/core/lib/transport/connectivity_state.cc",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/custom_metadata.h",

@ -1099,6 +1099,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -1908,6 +1909,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -2604,6 +2606,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -3027,6 +3030,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -4701,6 +4705,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -5001,6 +5006,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -6584,6 +6590,7 @@ targets:
- src/core/lib/surface/channel_stack_type.h
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/http2_errors.h
@ -6651,6 +6658,7 @@ targets:
- src/core/lib/surface/channel_stack_type.cc
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/message.cc
- src/core/lib/transport/metadata.cc
@ -6782,6 +6790,48 @@ targets:
- linux
- posix
uses_polling: false
- name: call_state_test
gtest: true
build: test
language: c++
headers:
- src/core/lib/debug/trace.h
- src/core/lib/debug/trace_flags.h
- src/core/lib/debug/trace_impl.h
- src/core/lib/event_engine/event_engine_context.h
- src/core/lib/gprpp/atomic_utils.h
- src/core/lib/gprpp/down_cast.h
- src/core/lib/gprpp/dump_args.h
- src/core/lib/gprpp/glob.h
- src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/promise/activity.h
- src/core/lib/promise/context.h
- src/core/lib/promise/detail/promise_factory.h
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/status_flag.h
- src/core/lib/transport/call_state.h
- test/core/promise/poll_matcher.h
src:
- src/core/lib/debug/trace.cc
- src/core/lib/debug/trace_flags.cc
- src/core/lib/gprpp/dump_args.cc
- src/core/lib/gprpp/glob.cc
- src/core/lib/promise/activity.cc
- src/core/lib/transport/call_state.cc
- test/core/transport/call_state_test.cc
deps:
- gtest
- absl/base:config
- absl/container:flat_hash_map
- absl/hash:hash
- absl/meta:type_traits
- absl/status:statusor
- gpr
uses_polling: false
- name: call_tracer_test
gtest: true
build: test
@ -7065,6 +7115,7 @@ targets:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -7333,6 +7384,7 @@ targets:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -12788,6 +12840,7 @@ targets:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -13057,6 +13110,7 @@ targets:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc

1
config.m4 generated

@ -718,6 +718,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/transport/call_filters.cc \
src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
src/core/lib/transport/interception_chain.cc \

1
config.w32 generated

@ -683,6 +683,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\transport\\call_filters.cc " +
"src\\core\\lib\\transport\\call_final_info.cc " +
"src\\core\\lib\\transport\\call_spine.cc " +
"src\\core\\lib\\transport\\call_state.cc " +
"src\\core\\lib\\transport\\connectivity_state.cc " +
"src\\core\\lib\\transport\\error_utils.cc " +
"src\\core\\lib\\transport\\interception_chain.cc " +

@ -0,0 +1,265 @@
# xDS Bootstrap File Format in gRPC
This document specifies the xDS bootstrap file format supported by gRPC.
## Background
gRPC expects the xDS bootstrap configuration to be specified as a JSON string.
The xDS bootstrap file location may be specified using the environment variable
`GRPC_XDS_BOOTSTRAP`. Alternatively, the bootstrap file contents may be
specified using the environment variable `GRPC_XDS_BOOTSTRAP_CONFIG`. If both
are specified, the former takes precendence.
The xDS client inside of gRPC parses the bootstrap configuration specified by
one of the above means when it is created to configure itself.
The following sections describe the bootstrap file format, including links to
gRFCs where support for appropriate fields was added.
## File Format
```
{
// The xDS server to talk to. The value is an ordered array of server
// configurations, to support failing over to a secondary xDS server if the
// primary is down.
//
// Prior to gRFC A71, all but the first entry was ignored.
"xds_servers": [
{
// A target URI string suitable for creating a gRPC channel.
"server_uri": <string containing the target URI of xds server>,
// List of channel creds; client will stop at the first type it
// supports. This field is required and must contain at least one
// channel creds type that the client supports.
//
// See section titled "Supported Channel Credentials".
"channel_creds": [
{
"type": <string containing channel cred type>,
// The "config" field is optional; it may be missing if the
// credential type does not require config parameters.
"config": <JSON object containing config for the type>
}
],
// A list of features supported by the server. New values will
// be added over time. For forward compatibility reasons, the
// client will ignore any entry in the list that it does not
// understand, regardless of type.
//
// See section titled "Supported Server Features".
"server_features": [ ... ]
}
],
// Identifies a specific gRPC instance.
"node": {
// Opaque identifier for the gRPC instance.
"id": <string>,
// Identifier for the local service cluster where the gRPC instance is
// running.
"cluster": <string>,
// Specifies where the gRPC instance is running.
"locality": {
"region": <string>,
"zone": <string>,
"sub_zone": <string>,
},
// Opaque metadata extending the node identifier.
"metadata": <JSON Object>,
}
// Map of supported certificate providers, keyed by the provider instance
// name.
// See section titled "Supported certificate providers".
"certificate_providers": {
// Certificate provider instance name, specified by the
// control plane, to fetch certificates from.
"<instance_name>": {
// Name of the plugin implementation.
"plugin_name": <string containing plugin type>,
// A JSON object containing the configuration for the plugin, whose schema
// is defined by the plugin. The "config" field is optional; it may be
// missing if the credential type does not require config parameters.
"config": <JSON object containing config for the type>
}
}
// A template for the name of the Listener resource to subscribe to for a gRPC
// server. If the token `%s` is present in the string, all instances of the
// token will be replaced with the server's listening "IP:port" (e.g.,
// "0.0.0.0:8080", "[::]:8080").
"server_listener_resource_name_template": "example/resource/%s",
// A template for the name of the Listener resource to subscribe to for a gRPC
// client channel. Used only when the channel is created with an "xds:" URI
// with no authority.
//
// If starts with "xdstp:", will be interpreted as a new-style name, in which
// case the authority of the URI will be used to select the relevant
// configuration in the "authorities" map.
//
// The token "%s", if present in this string, will be replaced with the
// service authority (i.e., the path part of the target URI used to create the
// gRPC channel). If the template starts with "xdstp:", the replaced string
// will be percent-encoded. In that case, the replacement string must include
// only characters allowed in a URI path as per RFC-3986 section 3.3 (which
// includes '/'), and all other characters must be percent-encoded.
//
// Defaults to "%s".
"client_default_listener_resource_name_template": <string>,
// A map of authority name to corresponding configuration.
//
// This is used in the following cases:
// - A gRPC client channel is created using an "xds:" URI that includes
// an authority.
// - A gRPC client channel is created using an "xds:" URI with no
// authority, but the "client_default_listener_resource_name_template"
// field turns it into an "xdstp:" URI.
// - A gRPC server is created and the
// "server_listener_resource_name_template" field is an "xdstp:" URI.
//
// In any of those cases, it is an error if the specified authority is
// not present in this map.
"authorities": {
// Entries are keyed by authority name.
// Note: If a new-style resource name has no authority, we will use
// the empty string here as the key.
"<authority_name>": {
// A template for the name of the Listener resource to subscribe
// to for a gRPC client channel. Used only when the channel is
// created using an "xds:" URI with this authority name.
//
// The token "%s", if present in this string, will be replaced
// with percent-encoded service authority (i.e., the path part of the
// target URI used to create the gRPC channel). The replacement string
// must include only characters allowed in a URI path as per RFC-3986
// section 3.3 (which includes '/'), and all other characters must be
// percent-encoded.
//
// Must start with "xdstp://<authority_name>/". If it does not,
// that is considered a bootstrap file parsing error.
//
// If not present in the bootstrap file, defaults to
// "xdstp://<authority_name>/envoy.config.listener.v3.Listener/%s".
"client_listener_resource_name_template": <string>,
// Ordered list of xDS servers to contact for this authority.
// Format is exactly the same as the top level "xds_servers" field.
//
// If the same server is listed in multiple authorities, the
// entries will be de-duped (i.e., resources for both authorities
// will be fetched on the same ADS stream).
//
// If not specified, the top-level server list is used.
"xds_servers": [ ... ]
}
}
}
```
### Supported Channel Credentials
gRPC supports the following channel credentials as part of the `channel_creds`
field of `xds_servers`.
#### Insecure credentials
- **Type Name**: `insecure`
- **Config**: Accepts no configuration
#### Google Default credentials
- **Type Name**: `google_default`
- **Config**: Accepts no configuration
#### mTLS credentials
- **Type Name**: `tls`
- **Config**: As described in [gRFC A65](a65):
```
{
// Path to CA certificate file.
// If unset, system-wide root certs are used.
"ca_certificate_file": <string>,
// Paths to identity certificate file and private key file.
// If either of these fields are set, both must be set.
// If set, mTLS will be used; if unset, normal TLS will be used.
"certificate_file": <string>,
"private_key_file": <string>,
// How often to re-read the certificate files.
// Value is the JSON format described for a google.protobuf.Duration
// message in https://protobuf.dev/programming-guides/proto3/#json.
// If unset, defaults to "600s".
"refresh_interval": <string>
}
```
### Supported Certificate Provider Instances
gRPC supports the following Certificate Provider instances as part of the
`certificate_providers` field:
#### PEM file watcher
- **Plugin Name**: `file_watcher`
- **Config**: As described in [gRFC A29](a29):
```
{
"certificate_file": "<path to the certificate file in PEM format>",
"private_key_file": "<path to private key file in PEM format>",
"ca_certificate_file": "<path to CA certificate file in PEM format>",
"refresh_interval": "<JSON form of google.protobuf.Duration>"
}
```
### Supported Server Features
gRPC supports the following server features in the `server_features` field
inside `xds_servers`:
- `xds_v3`: Added in gRFC A30. Supported in older versions of gRPC. See
[here](grpc_xds_features.md) for when gRPC added support for xDS transport
protocol v3, and when support for xDS transport protocol v2 was dropped.
- `ignore_resource_deletion`: Added in [gRFC A53](a53)
### When were fields added?
| Bootstrap Field | Relevant gRFCs
------------------|---------------
`xds_servers` | [A27](a27), [A71](a71)
`google_default` channel credentials | [A27](a27)
`insecure` channel credentials | [A27](a27)
`node` | [A27](a27)
`certificate_providers` | [A29](a29)
`file_watcher`certificate provider | [A29](a29)
`xds_servers.server_features` | [A30](a30)
`server_listener_resource_name_template` | [A36](a36), [A47](a47)
`client_default_listener_resource_name_template` | [A47](a47)
`authorities` | [A47](a47)
`tls` channel credentials | [A65](a65)
[a27]: https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md
[a29]: https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md#file_watcher-certificate-provider
[a30]: https://github.com/grpc/proposal/blob/master/A30-xds-v3.md
[a36]: https://github.com/grpc/proposal/blob/master/A36-xds-for-servers.md
[a47]: https://github.com/grpc/proposal/blob/master/A47-xds-federation.md
[a53]: https://github.com/grpc/proposal/blob/master/A53-xds-ignore-resource-deletion.md
[a65]: https://github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md#proposal
[a71]: https://github.com/grpc/proposal/blob/master/A71-xds-fallback.md

1
doc/trace_flags.md generated

@ -90,6 +90,7 @@ accomplished by invoking `bazel build --config=dbg <target>`
- auth_context_refcount - Auth context refcounting.
- call_combiner - Call combiner state.
- call_refcount - Refcount on call.
- call_state - Traces transitions through the call spine state machine.
- closure - Legacy closure creation, scheduling, and completion.
- combiner - Combiner lock state.
- cq_refcount - Completion queue refcounting.

2
gRPC-C++.podspec generated

@ -1202,6 +1202,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',
@ -2476,6 +2477,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',

3
gRPC-Core.podspec generated

@ -1809,6 +1809,8 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.cc',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.cc',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
@ -3248,6 +3250,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',

2
grpc.gemspec generated

@ -1696,6 +1696,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/call_final_info.h )
s.files += %w( src/core/lib/transport/call_spine.cc )
s.files += %w( src/core/lib/transport/call_spine.h )
s.files += %w( src/core/lib/transport/call_state.cc )
s.files += %w( src/core/lib/transport/call_state.h )
s.files += %w( src/core/lib/transport/connectivity_state.cc )
s.files += %w( src/core/lib/transport/connectivity_state.h )
s.files += %w( src/core/lib/transport/custom_metadata.h )

2
package.xml generated

@ -1678,6 +1678,8 @@
<file baseinstalldir="/" name="src/core/lib/transport/call_final_info.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_spine.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_spine.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_state.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/custom_metadata.h" role="src" />

@ -5076,14 +5076,139 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_xds_client",
name = "xds_certificate_provider",
srcs = [
"xds/grpc/xds_certificate_provider.cc",
],
hdrs = [
"xds/grpc/xds_certificate_provider.h",
],
external_deps = [
"absl/base:core_headers",
"absl/functional:bind_front",
"absl/log:check",
"absl/log:log",
"absl/strings",
"absl/types:optional",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"channel_args",
"error",
"grpc_matchers",
"grpc_tls_credentials",
"unique_type_name",
"useful",
"//:gpr",
"//:grpc_base",
"//:ref_counted_ptr",
"//:tsi_ssl_credentials",
],
)
grpc_cc_library(
name = "xds_certificate_provider_store",
srcs = [
"lib/security/credentials/xds/xds_credentials.cc",
"xds/grpc/certificate_provider_store.cc",
],
hdrs = [
"xds/grpc/certificate_provider_store.h",
],
external_deps = [
"absl/base:core_headers",
"absl/log:log",
"absl/strings",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"certificate_provider_factory",
"certificate_provider_registry",
"grpc_tls_credentials",
"json",
"json_args",
"json_object_loader",
"unique_type_name",
"useful",
"validation_errors",
"//:config",
"//:gpr",
"//:grpc_base",
"//:orphanable",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "xds_credentials",
srcs = [
"lib/security/credentials/xds/xds_credentials.cc",
],
hdrs = [
"lib/security/credentials/xds/xds_credentials.h",
],
external_deps = [
"absl/status",
"absl/log:check",
"absl/log:log",
"absl/types:optional",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"channel_args",
"grpc_lb_xds_channel_args",
"grpc_matchers",
"grpc_tls_credentials",
"unique_type_name",
"useful",
"xds_certificate_provider",
"//:channel_arg_names",
"//:gpr",
"//:grpc_base",
"//:grpc_core_credentials_header",
"//:grpc_credentials_util",
"//:grpc_security_base",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "xds_file_watcher_certificate_provider_factory",
srcs = [
"xds/grpc/file_watcher_certificate_provider_factory.cc",
],
hdrs = [
"xds/grpc/file_watcher_certificate_provider_factory.h",
],
external_deps = [
"absl/log:log",
"absl/strings",
"absl/strings:str_format",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"certificate_provider_factory",
"grpc_tls_credentials",
"json",
"json_args",
"json_object_loader",
"time",
"validation_errors",
"//:config",
"//:gpr",
"//:grpc_base",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "grpc_xds_client",
srcs = [
"xds/grpc/xds_audit_logger_registry.cc",
"xds/grpc/xds_bootstrap_grpc.cc",
"xds/grpc/xds_certificate_provider.cc",
"xds/grpc/xds_client_grpc.cc",
"xds/grpc/xds_cluster.cc",
"xds/grpc/xds_cluster_specifier_plugin.cc",
@ -5101,12 +5226,8 @@ grpc_cc_library(
"xds/grpc/xds_transport_grpc.cc",
],
hdrs = [
"lib/security/credentials/xds/xds_credentials.h",
"xds/grpc/certificate_provider_store.h",
"xds/grpc/file_watcher_certificate_provider_factory.h",
"xds/grpc/xds_audit_logger_registry.h",
"xds/grpc/xds_bootstrap_grpc.h",
"xds/grpc/xds_certificate_provider.h",
"xds/grpc/xds_client_grpc.h",
"xds/grpc/xds_cluster.h",
"xds/grpc/xds_cluster_specifier_plugin.h",
@ -5251,6 +5372,10 @@ grpc_cc_library(
"upb_utils",
"useful",
"validation_errors",
"xds_certificate_provider",
"xds_certificate_provider_store",
"xds_credentials",
"xds_file_watcher_certificate_provider_factory",
"xds_type_upb",
"xds_type_upbdefs",
"//:channel",
@ -5339,6 +5464,9 @@ grpc_cc_library(
"resolved_address",
"slice_refcount",
"unique_type_name",
"xds_certificate_provider",
"xds_certificate_provider_store",
"xds_credentials",
"//:api_trace",
"//:config",
"//:debug_location",
@ -5483,6 +5611,7 @@ grpc_cc_library(
"resolved_address",
"subchannel_interface",
"validation_errors",
"xds_credentials",
"xds_dependency_manager",
"//:call_tracer",
"//:config",
@ -7438,6 +7567,24 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "call_state",
srcs = [
"lib/transport/call_state.cc",
],
hdrs = [
"lib/transport/call_state.h",
],
external_deps = ["absl/types:optional"],
deps = [
"activity",
"poll",
"status_flag",
"//:gpr",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "call_filters",
srcs = [
@ -7449,6 +7596,7 @@ grpc_cc_library(
external_deps = ["absl/log:check"],
deps = [
"call_final_info",
"call_state",
"dump_args",
"if",
"latch",

@ -76,7 +76,7 @@ const int32_t kTimeoutSecs = 120;
ChaoticGoodConnector::ChaoticGoodConnector(
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine)
: event_engine_(std::move(event_engine)),
handshake_mgr_(std::make_shared<HandshakeManager>()) {}
handshake_mgr_(MakeRefCounted<HandshakeManager>()) {}
ChaoticGoodConnector::~ChaoticGoodConnector() {
CHECK_EQ(notify_, nullptr);

@ -93,7 +93,7 @@ class ChaoticGoodConnector : public SubchannelConnector {
ActivityPtr connect_activity_ ABSL_GUARDED_BY(mu_);
const std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine_;
std::shared_ptr<HandshakeManager> handshake_mgr_;
RefCountedPtr<HandshakeManager> handshake_mgr_;
HPackCompressor hpack_compressor_;
HPackParser hpack_parser_;
absl::BitGen bitgen_;

@ -53,7 +53,6 @@
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
@ -419,9 +418,9 @@ static void read_channel_args(grpc_chttp2_transport* t,
channel_args.GetInt(GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER).value_or(-1);
if (initial_sequence_number > 0) {
if ((t->next_stream_id & 1) != (initial_sequence_number & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1,
is_client ? "client" : "server");
LOG(ERROR) << GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
<< ": low bit must be " << (t->next_stream_id & 1) << " on "
<< (is_client ? "client" : "server");
} else {
t->next_stream_id = static_cast<uint32_t>(initial_sequence_number);
}
@ -527,8 +526,8 @@ static void read_channel_args(grpc_chttp2_transport* t,
t->max_concurrent_streams_policy.SetTarget(value);
}
} else if (channel_args.Contains(GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
gpr_log(GPR_DEBUG, "%s is not available on clients",
GRPC_ARG_MAX_CONCURRENT_STREAMS);
VLOG(2) << GRPC_ARG_MAX_CONCURRENT_STREAMS
<< " is not available on clients";
}
value =
channel_args.GetInt(GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER).value_or(-1);
@ -926,11 +925,11 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "W:%p %s [%s] state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
std::string(t->peer_string.as_string_view()).c_str(),
write_state_name(t->write_state), write_state_name(st), reason));
GRPC_TRACE_LOG(http, INFO)
<< "W:" << t << " " << (t->is_client ? "CLIENT" : "SERVER") << " ["
<< t->peer_string.as_string_view() << "] state "
<< write_state_name(t->write_state) << " -> " << write_state_name(st)
<< " [" << reason << "]";
t->write_state = st;
// If the state is being reset back to idle, it means a write was just
// finished. Make sure all the run_after_write closures are scheduled.
@ -1020,11 +1019,10 @@ static void write_action_begin_locked(
// We had paused reading, because we had many induced frames (SETTINGS
// ACK, PINGS ACK and RST_STREAMS) pending in t->qbuf. Now that we have
// been able to flush qbuf, we can resume reading.
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO,
"transport %p : Resuming reading after being paused due to too "
"many unwritten SETTINGS ACK, PINGS ACK and RST_STREAM frames",
t.get()));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t.get()
<< " : Resuming reading after being paused due to too many unwritten "
"SETTINGS ACK, PINGS ACK and RST_STREAM frames";
t->reading_paused_on_pending_induced_frames = false;
continue_read_action_locked(std::move(t));
}
@ -1151,15 +1149,15 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
static_cast<intptr_t>(goaway_error)),
grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE);
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "transport %p got goaway with last stream id %d", t,
last_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t << " got goaway with last stream id "
<< last_stream_id;
// We want to log this irrespective of whether http tracing is enabled if we
// received a GOAWAY with a non NO_ERROR code.
if (goaway_error != GRPC_HTTP2_NO_ERROR) {
gpr_log(GPR_INFO, "%s: Got goaway [%d] err=%s",
std::string(t->peer_string.as_string_view()).c_str(), goaway_error,
grpc_core::StatusToString(t->goaway_error).c_str());
LOG(INFO) << t->peer_string.as_string_view() << ": Got goaway ["
<< goaway_error
<< "] err=" << grpc_core::StatusToString(t->goaway_error);
}
if (t->is_client) {
cancel_unstarted_streams(t, t->goaway_error, false);
@ -1185,12 +1183,11 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
if (GPR_UNLIKELY(t->is_client &&
goaway_error == GRPC_HTTP2_ENHANCE_YOUR_CALM &&
goaway_text == "too_many_pings")) {
gpr_log(GPR_ERROR,
"%s: Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\". Current keepalive time (before "
"throttling): %s",
std::string(t->peer_string.as_string_view()).c_str(),
t->keepalive_time.ToString().c_str());
LOG(ERROR) << t->peer_string.as_string_view()
<< ": Received a GOAWAY with error code ENHANCE_YOUR_CALM and "
"debug data equal to \"too_many_pings\". Current keepalive "
"time (before throttling): "
<< t->keepalive_time.ToString();
constexpr int max_keepalive_time_millis =
INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
int64_t throttled_keepalive_time =
@ -1222,10 +1219,10 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
t->stream_map.size() < t->settings.peer().max_concurrent_streams() &&
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
// safe since we can't (legally) be parsing this stream yet
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO,
"HTTP:%s: Transport %p allocating new grpc_chttp2_stream %p to id %d",
t->is_client ? "CLI" : "SVR", t, s, t->next_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "HTTP:" << (t->is_client ? "CLI" : "SVR") << ": Transport " << t
<< " allocating new grpc_chttp2_stream " << s << " to id "
<< t->next_stream_id;
CHECK_EQ(s->id, 0u);
s->id = t->next_stream_id;
@ -1288,17 +1285,13 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s whence=%s:%d",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_core::StatusToString(error).c_str(),
write_state_name(t->write_state), whence.file(), whence.line());
LOG(INFO) << "complete_closure_step: t=" << t << " " << closure << " refs="
<< (closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT)
<< " flags="
<< (closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT)
<< " desc=" << desc << " err=" << grpc_core::StatusToString(error)
<< " write_state=" << write_state_name(t->write_state)
<< " whence=" << whence.file() << ":" << whence.line();
}
if (!error.ok()) {
@ -1341,7 +1334,7 @@ static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
const std::string prefix = absl::StrCat(
"HTTP:", id, is_initial ? ":HDR" : ":TRL", is_client ? ":CLI:" : ":SVR:");
md_batch->Log([&prefix](absl::string_view key, absl::string_view value) {
VLOG(2) << absl::StrCat(prefix, key, ": ", value);
VLOG(2) << prefix << key << ": " << value;
});
}
@ -1358,10 +1351,9 @@ static void perform_stream_op_locked(void* stream_op,
s->call_tracer = CallTracerIfSampled(s);
s->tcp_tracer = TcpTracerIfSampled(s);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO,
"perform_stream_op_locked[s=%p; op=%p]: %s; on_complete = %p", s,
op, grpc_transport_stream_op_batch_string(op, false).c_str(),
op->on_complete);
LOG(INFO) << "perform_stream_op_locked[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false)
<< "; on_complete = " << op->on_complete;
if (op->send_initial_metadata) {
log_metadata(op_payload->send_initial_metadata.send_initial_metadata,
s->id, t->is_client, true);
@ -1625,8 +1617,8 @@ void grpc_chttp2_transport::PerformStreamOp(
}
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_stream_op[s=%p; op=%p]: %s", s, op,
grpc_transport_stream_op_batch_string(op, false).c_str());
LOG(INFO) << "perform_stream_op[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
}
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
@ -1637,8 +1629,8 @@ void grpc_chttp2_transport::PerformStreamOp(
}
static void cancel_pings(grpc_chttp2_transport* t, grpc_error_handle error) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "%p CANCEL PINGS: %s", t,
grpc_core::StatusToString(error).c_str()));
GRPC_TRACE_LOG(http, INFO)
<< t << " CANCEL PINGS: " << grpc_core::StatusToString(error);
// callback remaining pings: they're not allowed to call into the transport,
// and maybe they hold resources that need to be freed
t->ping_callbacks.CancelAll(t->event_engine.get());
@ -1721,8 +1713,8 @@ static void retry_initiate_ping_locked(
void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
if (!t->ping_callbacks.AckPing(id, t->event_engine.get())) {
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64,
std::string(t->peer_string.as_string_view()).c_str(), id);
VLOG(2) << "Unknown ping response from " << t->peer_string.as_string_view()
<< ": " << id;
return;
}
if (t->ping_callbacks.ping_requested()) {
@ -1732,65 +1724,62 @@ void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
void grpc_chttp2_keepalive_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Keepalive timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("keepalive_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("keepalive timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Keepalive timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("keepalive_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("keepalive timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
void grpc_chttp2_ping_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Ping timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("ping_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("ping timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Ping timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("ping_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("ping timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
void grpc_chttp2_settings_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Settings timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("settings_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_SETTINGS_TIMEOUT),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("settings timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Settings timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("settings_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_SETTINGS_TIMEOUT),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("settings timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
namespace {
@ -1827,22 +1816,21 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
return;
}
if (t_->destroying || !t_->closed_with_error.ok()) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport:%p %s peer:%s Transport already shutting down. "
"Graceful GOAWAY abandoned.",
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
std::string(t_->peer_string.as_string_view()).c_str()));
GRPC_TRACE_LOG(http, INFO) << "transport:" << t_.get() << " "
<< (t_->is_client ? "CLIENT" : "SERVER")
<< " peer:" << t_->peer_string.as_string_view()
<< " Transport already shutting down. "
"Graceful GOAWAY abandoned.";
return;
}
// Ping completed. Send final goaway.
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport:%p %s peer:%s Graceful shutdown: Ping received. "
"Sending final GOAWAY with stream_id:%d",
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
std::string(t_->peer_string.as_string_view()).c_str(),
t_->last_new_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "transport:" << t_.get() << " "
<< (t_->is_client ? "CLIENT" : "SERVER")
<< " peer:" << std::string(t_->peer_string.as_string_view())
<< " Graceful shutdown: Ping received. "
"Sending final GOAWAY with stream_id:"
<< t_->last_new_stream_id;
t_->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(t_->last_new_stream_id, 0, grpc_empty_slice(),
&t_->qbuf);
@ -1886,10 +1874,10 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error,
} else if (t->sent_goaway_state == GRPC_CHTTP2_NO_GOAWAY_SEND ||
t->sent_goaway_state == GRPC_CHTTP2_GRACEFUL_GOAWAY) {
// We want to log this irrespective of whether http tracing is enabled
gpr_log(GPR_DEBUG, "%s %s: Sending goaway last_new_stream_id=%d err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
t->is_client ? "CLIENT" : "SERVER", t->last_new_stream_id,
grpc_core::StatusToString(error).c_str());
VLOG(2) << t->peer_string.as_string_view() << " "
<< (t->is_client ? "CLIENT" : "SERVER")
<< ": Sending goaway last_new_stream_id=" << t->last_new_stream_id
<< " err=" << grpc_core::StatusToString(error);
t->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(
t->last_new_stream_id, static_cast<uint32_t>(http_error),
@ -1973,8 +1961,8 @@ static void perform_transport_op_locked(void* stream_op,
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", this,
grpc_transport_op_string(op).c_str());
LOG(INFO) << "perform_transport_op[t=" << this
<< "]: " << grpc_transport_op_string(op);
}
op->handler_private.extra_arg = this;
Ref().release()->combiner->Run(
@ -2026,10 +2014,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
// exited out of at any point by returning.
[&]() {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_message %p final_metadata_requested=%d "
"seen_error=%d",
s, s->final_metadata_requested, s->seen_error);
VLOG(2) << "maybe_complete_recv_message " << s
<< " final_metadata_requested=" << s->final_metadata_requested
<< " seen_error=" << s->seen_error;
}
if (s->final_metadata_requested && s->seen_error) {
grpc_slice_buffer_reset_and_unref(&s->frame_storage);
@ -2042,10 +2029,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
auto r = grpc_deframe_unprocessed_incoming_frames(
s, &min_progress_size, &**s->recv_message, s->recv_message_flags);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG, "Deframe data frame: %s",
grpc_core::PollToString(r, [](absl::Status r) {
return r.ToString();
}).c_str());
VLOG(2) << "Deframe data frame: "
<< grpc_core::PollToString(
r, [](absl::Status r) { return r.ToString(); });
}
if (r.pending()) {
if (s->read_closed) {
@ -2098,12 +2084,11 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) {
grpc_chttp2_maybe_complete_recv_message(t, s);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_trailing_metadata cli=%d s=%p closure=%p "
"read_closed=%d "
"write_closed=%d %" PRIdPTR,
t->is_client, s, s->recv_trailing_metadata_finished, s->read_closed,
s->write_closed, s->frame_storage.length);
VLOG(2) << "maybe_complete_recv_trailing_metadata cli=" << t->is_client
<< " s=" << s << " closure=" << s->recv_trailing_metadata_finished
<< " read_closed=" << s->read_closed
<< " write_closed=" << s->write_closed << " "
<< s->frame_storage.length;
}
if (s->recv_trailing_metadata_finished != nullptr && s->read_closed &&
s->write_closed) {
@ -2309,12 +2294,13 @@ grpc_chttp2_transport::RemovedStreamHandle grpc_chttp2_mark_stream_closed(
int close_writes, grpc_error_handle error) {
grpc_chttp2_transport::RemovedStreamHandle rsh;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_DEBUG, "MARK_STREAM_CLOSED: t=%p s=%p(id=%d) %s [%s]", t, s, s->id,
(close_reads && close_writes)
? "read+write"
: (close_reads ? "read" : (close_writes ? "write" : "nothing??")),
grpc_core::StatusToString(error).c_str());
VLOG(2) << "MARK_STREAM_CLOSED: t=" << t << " s=" << s << "(id=" << s->id
<< ") "
<< ((close_reads && close_writes)
? "read+write"
: (close_reads ? "read"
: (close_writes ? "write" : "nothing??")))
<< " [" << grpc_core::StatusToString(error) << "]";
}
if (s->read_closed && s->write_closed) {
// already closed, but we should still fake the status if needed.
@ -2722,11 +2708,10 @@ static void read_action_parse_loop_locked(
if (keep_reading) {
if (t->num_pending_induced_frames >= DEFAULT_MAX_PENDING_INDUCED_FRAMES) {
t->reading_paused_on_pending_induced_frames = true;
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport %p : Pausing reading due to too "
"many unwritten SETTINGS ACK and RST_STREAM frames",
t.get()));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t.get()
<< " : Pausing reading due to too many unwritten "
"SETTINGS ACK and RST_STREAM frames";
} else {
continue_read_action_locked(std::move(t));
}
@ -2741,9 +2726,8 @@ static void read_action_locked(
if (t->keepalive_ping_timeout_handle != TaskHandle::kInvalid) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO,
"%s[%p]: Clear keepalive timer because data was received",
t->is_client ? "CLIENT" : "SERVER", t.get());
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t.get()
<< "]: Clear keepalive timer because data was received";
}
t->event_engine->Cancel(
std::exchange(t->keepalive_ping_timeout_handle, TaskHandle::kInvalid));
@ -2794,9 +2778,8 @@ static void start_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
}
if (!error.ok() || !t->closed_with_error.ok()) {
return;
@ -2821,9 +2804,8 @@ static void finish_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
}
if (!error.ok() || !t->closed_with_error.ok()) {
return;
@ -2966,8 +2948,8 @@ static void finish_keepalive_ping_locked(
if (error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Finish keepalive ping",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Finish keepalive ping";
}
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
CHECK(t->keepalive_ping_timer_handle == TaskHandle::kInvalid);
@ -2988,8 +2970,8 @@ static void maybe_reset_keepalive_ping_timer_locked(grpc_chttp2_transport* t) {
// need to Ref or Unref here since we still hold the Ref.
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Keepalive ping cancelled. Resetting timer.",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Keepalive ping cancelled. Resetting timer.";
}
t->keepalive_ping_timer_handle =
t->event_engine->RunAfter(t->keepalive_time, [t = t->Ref()]() mutable {
@ -3008,9 +2990,9 @@ static void connectivity_state_set(grpc_chttp2_transport* t,
grpc_connectivity_state state,
const absl::Status& status,
const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO, "transport %p set connectivity_state=%d; status=%s; reason=%s",
t, state, status.ToString().c_str(), reason));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t << " set connectivity_state=" << state
<< "; status=" << status.ToString() << "; reason=" << reason;
t->state_tracker.SetState(state, status, reason);
}
@ -3087,8 +3069,8 @@ static void benign_reclaimer_locked(
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - send goaway to free memory";
}
send_goaway(t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
@ -3096,11 +3078,9 @@ static void benign_reclaimer_locked(
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
std::string(t->peer_string.as_string_view()).c_str(),
t->stream_map.size());
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - skip benign reclamation, there are still "
<< t->stream_map.size() << " streams";
}
t->benign_reclaimer_registered = false;
if (error != absl::CancelledError()) {
@ -3116,8 +3096,8 @@ static void destructive_reclaimer_locked(
// As stream_map is a hash map, this selects effectively a random stream.
grpc_chttp2_stream* s = t->stream_map.begin()->second;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
std::string(t->peer_string.as_string_view()).c_str(), s->id);
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - abandon stream id " << s->id;
}
grpc_chttp2_cancel_stream(
t.get(), s,

@ -96,6 +96,11 @@ void HandshakeManager::DoHandshake(
Timestamp deadline, grpc_tcp_server_acceptor* acceptor,
absl::AnyInvocable<void(absl::StatusOr<HandshakerArgs*>)>
on_handshake_done) {
// We hold a ref until after the mutex is released, because we might
// wind up invoking on_handshake_done in another thread before we
// return from this function, and on_handshake_done might release the
// last ref to this object.
auto self = Ref();
MutexLock lock(&mu_);
CHECK_EQ(index_, 0u);
on_handshake_done_ = std::move(on_handshake_done);

@ -26,6 +26,7 @@ namespace grpc_core {
DebugOnlyTraceFlag auth_context_refcount_trace(false, "auth_context_refcount");
DebugOnlyTraceFlag call_combiner_trace(false, "call_combiner");
DebugOnlyTraceFlag call_refcount_trace(false, "call_refcount");
DebugOnlyTraceFlag call_state_trace(false, "call_state");
DebugOnlyTraceFlag closure_trace(false, "closure");
DebugOnlyTraceFlag combiner_trace(false, "combiner");
DebugOnlyTraceFlag cq_refcount_trace(false, "cq_refcount");
@ -229,6 +230,7 @@ const absl::flat_hash_map<std::string, TraceFlag*>& GetAllTraceFlags() {
{"auth_context_refcount", &auth_context_refcount_trace},
{"call_combiner", &call_combiner_trace},
{"call_refcount", &call_refcount_trace},
{"call_state", &call_state_trace},
{"closure", &closure_trace},
{"combiner", &combiner_trace},
{"cq_refcount", &cq_refcount_trace},

@ -26,6 +26,7 @@ namespace grpc_core {
extern DebugOnlyTraceFlag auth_context_refcount_trace;
extern DebugOnlyTraceFlag call_combiner_trace;
extern DebugOnlyTraceFlag call_refcount_trace;
extern DebugOnlyTraceFlag call_state_trace;
extern DebugOnlyTraceFlag closure_trace;
extern DebugOnlyTraceFlag combiner_trace;
extern DebugOnlyTraceFlag cq_refcount_trace;

@ -54,6 +54,10 @@ call_refcount:
debug_only: true
default: false
description: Refcount on call.
call_state:
debug_only: true
default: false
description: Traces transitions through the call spine state machine.
cares_address_sorting:
default: false
description: Operations of the c-ares based DNS resolver's address sorter.

@ -50,7 +50,6 @@
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@ -619,7 +618,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " destroy";
}
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
@ -628,7 +627,7 @@ static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " run";
}
gpr_mu_lock(p->pollset_mu);
grpc_core::Timestamp deadline =
@ -645,7 +644,7 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
g_uncovered_notifications_pending = 0;
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " shutdown";
}
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
@ -653,7 +652,7 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
} else {
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " reschedule";
}
grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
grpc_core::ExecutorType::DEFAULT,
@ -670,8 +669,8 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
g_backup_poller_mu->Unlock();
CHECK_GT(old_count, 1);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, old_count,
old_count - 1);
LOG(INFO) << "BACKUP_POLLER:" << p << " uncover cnt " << old_count << "->"
<< old_count - 1;
}
}
@ -694,7 +693,7 @@ static void cover_self(grpc_tcp* tcp) {
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " create";
}
grpc_core::Executor::Run(
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
@ -706,22 +705,22 @@ static void cover_self(grpc_tcp* tcp) {
g_backup_poller_mu->Unlock();
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
old_count - 1, old_count);
LOG(INFO) << "BACKUP_POLLER:" << p << " add " << tcp << " cnt "
<< old_count - 1 << "->" << old_count;
}
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
}
static void notify_on_read(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
LOG(INFO) << "TCP:" << tcp << " notify_on_read";
}
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
LOG(INFO) << "TCP:" << tcp << " notify_on_write";
}
if (!grpc_event_engine_run_in_background()) {
cover_self(tcp);
@ -732,8 +731,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << arg
<< " got_write: " << grpc_core::StatusToString(error);
}
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
@ -852,10 +851,11 @@ static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
grpc_closure* cb = tcp->read_cb;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
LOG(INFO) << "TCP:" << tcp << " call_cb " << cb << " " << cb->cb << ":"
<< cb->cb_arg;
size_t i;
gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp,
tcp->peer_string.c_str(), grpc_core::StatusToString(error).c_str());
LOG(INFO) << "READ " << tcp << " (peer=" << tcp->peer_string
<< ") error=" << grpc_core::StatusToString(error);
if (ABSL_VLOG_IS_ON(2)) {
for (i = 0; i < tcp->incoming_buffer->count; i++) {
char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
@ -903,10 +903,8 @@ static void update_rcvlowat(grpc_tcp* tcp)
}
if (setsockopt(tcp->fd, SOL_SOCKET, SO_RCVLOWAT, &remaining,
sizeof(remaining)) != 0) {
gpr_log(GPR_ERROR, "%s",
absl::StrCat("Cannot set SO_RCVLOWAT on fd=", tcp->fd,
" err=", grpc_core::StrError(errno).c_str())
.c_str());
LOG(ERROR) << "Cannot set SO_RCVLOWAT on fd=" << tcp->fd
<< " err=" << grpc_core::StrError(errno);
return;
}
tcp->set_rcvlowat = remaining;
@ -917,7 +915,7 @@ static void update_rcvlowat(grpc_tcp* tcp)
static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
LOG(INFO) << "TCP:" << tcp << " do_read";
}
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
@ -1130,8 +1128,8 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << tcp
<< " got_read: " << grpc_core::StatusToString(error);
}
tcp->read_mu.Lock();
grpc_error_handle tcp_read_error;
@ -1471,9 +1469,8 @@ static bool process_errors(grpc_tcp* tcp) {
// Got a control message that is not a timestamp or zerocopy. Don't know
// how to handle this.
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO,
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
LOG(INFO) << "unknown control message cmsg_level:" << cmsg->cmsg_level
<< " cmsg_type:" << cmsg->cmsg_type;
}
return processed_err;
}
@ -1488,8 +1485,7 @@ static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << tcp << " got_error: " << error;
}
if (!error.ok() ||
@ -1847,7 +1843,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
size_t i;
for (i = 0; i < buf->count; i++) {
gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string.c_str());
LOG(INFO) << "WRITE " << tcp << " (peer=" << tcp->peer_string << ")";
if (ABSL_VLOG_IS_ON(2)) {
char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
@ -2030,7 +2026,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
tcp->inq_capable = true;
} else {
gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
VLOG(2) << "cannot set inq fd=" << tcp->fd << " errno=" << errno;
tcp->inq_capable = false;
}
#else

@ -44,6 +44,7 @@
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
@ -51,7 +52,6 @@
#include <grpc/event_engine/endpoint_config.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@ -165,22 +165,21 @@ static grpc_error_handle CreateEventEngineListener(
->GetWrappedFd();
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
gpr_log(GPR_ERROR, "Failed getpeername: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed getpeername: "
<< grpc_core::StrError(errno);
close(fd);
return;
}
(void)grpc_set_socket_no_sigpipe_if_possible(fd);
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: "
<< addr_uri.status().ToString();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO,
"SERVER_CONNECT: incoming external connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
<< addr_uri->c_str();
}
}
read_notifier_pollset =
@ -410,8 +409,7 @@ static void on_read(void* arg, grpc_error_handle err) {
}
gpr_mu_lock(&sp->server->mu);
if (!sp->server->shutdown_listeners) {
gpr_log(GPR_ERROR, "Failed accept4: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed accept4: " << grpc_core::StrError(errno);
} else {
// if we have shutdown listeners, accept4 could fail, and we
// needn't notify users
@ -424,10 +422,8 @@ static void on_read(void* arg, grpc_error_handle err) {
int64_t dropped_connections_count =
num_dropped_connections.fetch_add(1, std::memory_order_relaxed) + 1;
if (dropped_connections_count % 1000 == 1) {
gpr_log(GPR_INFO,
"Dropped >= %" PRId64
" new connection attempts due to high memory pressure",
dropped_connections_count);
LOG(INFO) << "Dropped >= " << dropped_connections_count
<< " new connection attempts due to high memory pressure";
}
close(fd);
continue;
@ -441,13 +437,11 @@ static void on_read(void* arg, grpc_error_handle err) {
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
auto listener_addr_uri = grpc_sockaddr_to_uri(&sp->addr);
gpr_log(
GPR_ERROR,
"Failed getpeername: %s. Dropping the connection, and continuing "
"to listen on %s:%d.",
grpc_core::StrError(errno).c_str(),
listener_addr_uri.ok() ? listener_addr_uri->c_str() : "<unknown>",
sp->port);
LOG(ERROR) << "Failed getpeername: " << grpc_core::StrError(errno)
<< ". Dropping the connection, and continuing to listen on "
<< (listener_addr_uri.ok() ? *listener_addr_uri
: "<unknown>")
<< ":" << sp->port;
close(fd);
continue;
}
@ -463,13 +457,11 @@ static void on_read(void* arg, grpc_error_handle err) {
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: " << addr_uri.status();
goto error;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming connection: " << *addr_uri;
}
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
@ -549,16 +541,14 @@ static grpc_error_handle add_wildcard_addrs_to_server(grpc_tcp_server* s,
}
if (*out_port > 0) {
if (!v6_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add :: listener, "
"the environment may not support IPv6: %s",
grpc_core::StatusToString(v6_err).c_str());
LOG(INFO) << "Failed to add :: listener, "
<< "the environment may not support IPv6: "
<< grpc_core::StatusToString(v6_err);
}
if (!v4_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add 0.0.0.0 listener, "
"the environment may not support IPv4: %s",
grpc_core::StatusToString(v4_err).c_str());
LOG(INFO) << "Failed to add 0.0.0.0 listener, "
<< "the environment may not support IPv4: "
<< grpc_core::StatusToString(v4_err);
}
return absl::OkStatus();
} else {
@ -916,21 +906,19 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
gpr_log(GPR_ERROR, "Failed getpeername: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed getpeername: " << grpc_core::StrError(errno);
close(fd);
return;
}
(void)grpc_set_socket_no_sigpipe_if_possible(fd);
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: " << addr_uri.status();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming external connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
<< *addr_uri;
}
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);

@ -23,10 +23,6 @@
namespace grpc_core {
namespace {
void* Offset(void* base, size_t amt) { return static_cast<char*>(base) + amt; }
} // namespace
namespace filters_detail {
void RunHalfClose(absl::Span<const HalfCloseOperator> ops, void* call_data) {
@ -129,39 +125,62 @@ char g_empty_call_data;
// CallFilters
CallFilters::CallFilters(ClientMetadataHandle client_initial_metadata)
: stack_(nullptr),
call_data_(nullptr),
: call_data_(nullptr),
push_client_initial_metadata_(std::move(client_initial_metadata)) {}
CallFilters::~CallFilters() {
if (call_data_ != nullptr && call_data_ != &g_empty_call_data) {
for (const auto& destructor : stack_->data_.filter_destructor) {
destructor.call_destroy(Offset(call_data_, destructor.call_offset));
for (const auto& stack : stacks_) {
for (const auto& destructor : stack.stack->data_.filter_destructor) {
destructor.call_destroy(filters_detail::Offset(
call_data_, stack.call_data_offset + destructor.call_offset));
}
}
gpr_free_aligned(call_data_);
}
}
void CallFilters::SetStack(RefCountedPtr<Stack> stack) {
void CallFilters::Start() {
CHECK_EQ(call_data_, nullptr);
stack_ = std::move(stack);
if (stack_->data_.call_data_size != 0) {
call_data_ = gpr_malloc_aligned(stack_->data_.call_data_size,
stack_->data_.call_data_alignment);
size_t call_data_alignment = 1;
for (const auto& stack : stacks_) {
call_data_alignment =
std::max(call_data_alignment, stack.stack->data_.call_data_alignment);
}
size_t call_data_size = 0;
for (auto& stack : stacks_) {
stack.call_data_offset = call_data_size;
size_t stack_call_data_size = stack.stack->data_.call_data_size;
if (stack_call_data_size % call_data_alignment != 0) {
stack_call_data_size +=
call_data_alignment - stack_call_data_size % call_data_alignment;
}
call_data_size += stack_call_data_size;
}
if (call_data_size != 0) {
call_data_ = gpr_malloc_aligned(call_data_size, call_data_alignment);
} else {
call_data_ = &g_empty_call_data;
}
for (const auto& constructor : stack_->data_.filter_constructor) {
constructor.call_init(Offset(call_data_, constructor.call_offset),
constructor.channel_data);
for (const auto& stack : stacks_) {
for (const auto& constructor : stack.stack->data_.filter_constructor) {
constructor.call_init(
filters_detail::Offset(
call_data_, stack.call_data_offset + constructor.call_offset),
constructor.channel_data);
}
}
call_state_.Start();
}
void CallFilters::Finalize(const grpc_call_final_info* final_info) {
for (auto& finalizer : stack_->data_.finalizers) {
finalizer.final(Offset(call_data_, finalizer.call_offset),
finalizer.channel_data, final_info);
for (auto& stack : stacks_) {
for (auto& finalizer : stack.stack->data_.finalizers) {
finalizer.final(
filters_detail::Offset(
call_data_, stack.call_data_offset + finalizer.call_offset),
finalizer.channel_data, final_info);
}
}
}
@ -235,699 +254,4 @@ RefCountedPtr<CallFilters::Stack> CallFilters::StackBuilder::Build() {
return RefCountedPtr<Stack>(new Stack(std::move(data_)));
}
///////////////////////////////////////////////////////////////////////////////
// CallState
namespace filters_detail {
CallState::CallState()
: client_to_server_pull_state_(ClientToServerPullState::kBegin),
client_to_server_push_state_(ClientToServerPushState::kIdle),
server_to_client_pull_state_(ServerToClientPullState::kUnstarted),
server_to_client_push_state_(ServerToClientPushState::kStart),
server_trailing_metadata_state_(ServerTrailingMetadataState::kNotPushed) {
}
void CallState::Start() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] Start: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kStarted;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kUnstartedReading:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "Start called twice";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
case ServerToClientPullState::kTerminated:
break;
}
}
void CallState::BeginPushClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedMessage;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "PushClientToServerMessage called twice concurrently";
break;
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "PushClientToServerMessage called after half-close";
break;
case ClientToServerPushState::kFinished:
break;
}
}
Poll<StatusFlag> CallState::PollPushClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
return Success{};
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
void CallState::ClientToServerHalfClose() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] ClientToServerHalfClose: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ =
ClientToServerPushState::kPushedMessageAndHalfClosed;
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "ClientToServerHalfClose called twice";
break;
case ClientToServerPushState::kFinished:
break;
}
}
void CallState::BeginPullClientInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientInitialMetadata;
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "BeginPullClientInitialMetadata called twice";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
void CallState::FinishPullClientInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
LOG(FATAL) << "FinishPullClientInitialMetadata called before Begin";
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "Out of order FinishPullClientInitialMetadata";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
Poll<ValueOrFailure<bool>> CallState::PollPullClientToServerMessageAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullClientToServerMessageAvailable: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
return client_to_server_pull_waiter_.pending();
case ClientToServerPullState::kIdle:
client_to_server_pull_state_ = ClientToServerPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ClientToServerPullState::kReading:
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "PollPullClientToServerMessageAvailable called while "
"processing a message";
break;
case ClientToServerPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(client_to_server_pull_state_, ClientToServerPullState::kReading);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientToServerMessage;
return true;
case ClientToServerPushState::kPushedHalfClose:
return false;
case ClientToServerPushState::kFinished:
client_to_server_pull_state_ = ClientToServerPullState::kTerminated;
return Failure{};
}
Crash("Unreachable");
}
void CallState::FinishPullClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
LOG(FATAL) << "FinishPullClientToServerMessage called before Begin";
break;
case ClientToServerPullState::kIdle:
LOG(FATAL) << "FinishPullClientToServerMessage called twice";
break;
case ClientToServerPullState::kReading:
LOG(FATAL) << "FinishPullClientToServerMessage called before "
"PollPullClientToServerMessageAvailable";
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kTerminated:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ = ClientToServerPushState::kIdle;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "FinishPullClientToServerMessage called without a message";
break;
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kFinished:
break;
}
}
StatusFlag CallState::PushServerInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PushServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_,
server_trailing_metadata_state_);
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return Failure{};
}
CHECK_EQ(server_to_client_push_state_, ServerToClientPushState::kStart);
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadata;
server_to_client_push_waiter_.Wake();
return Success{};
}
void CallState::BeginPushServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "BeginPushServerToClientMessage called before "
"PushServerInitialMetadata";
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage;
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL) << "BeginPushServerToClientMessage called twice concurrently";
break;
case ServerToClientPushState::kTrailersOnly:
// Will fail in poll.
break;
case ServerToClientPushState::kIdle:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kFinished:
break;
}
}
Poll<StatusFlag> CallState::PollPushServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
LOG(FATAL) << "PollPushServerToClientMessage called before "
<< "PushServerInitialMetadata";
case ServerToClientPushState::kTrailersOnly:
return false;
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
return Success{};
case ServerToClientPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
bool CallState::PushServerTrailingMetadata(bool cancel) {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PushServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, cancel, server_trailing_metadata_state_,
server_to_client_push_state_,
client_to_server_push_state_,
server_trailing_metadata_waiter_.DebugString());
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_state_ =
cancel ? ServerTrailingMetadataState::kPushedCancel
: ServerTrailingMetadataState::kPushed;
server_trailing_metadata_waiter_.Wake();
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
server_to_client_push_state_ = ServerToClientPushState::kTrailersOnly;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kIdle:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kFinished:
case ServerToClientPushState::kTrailersOnly:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kFinished:
break;
}
return true;
}
Poll<bool> CallState::PollPullServerInitialMetadataAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullServerInitialMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
bool reading;
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
return false;
}
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
reading = true;
break;
case ServerToClientPullState::kStarted:
reading = false;
break;
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerInitialMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return false;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kStarted ||
server_to_client_pull_state_ ==
ServerToClientPullState::kStartedReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_pull_state_ =
reading
? ServerToClientPullState::kProcessingServerInitialMetadataReading
: ServerToClientPullState::kProcessingServerInitialMetadata;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL)
<< "PollPullServerInitialMetadataAvailable after metadata processed";
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return false;
case ServerToClientPushState::kTrailersOnly:
return false;
}
Crash("Unreachable");
}
void CallState::FinishPullServerInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
LOG(FATAL) << "FinishPullServerInitialMetadata called before Start";
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
CHECK_EQ(server_to_client_push_state_,
ServerToClientPushState::kTrailersOnly);
return;
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "Out of order FinishPullServerInitialMetadata";
case ServerToClientPullState::kTerminated:
return;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kIdle ||
server_to_client_pull_state_ == ServerToClientPullState::kReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerInitialMetadata called before initial "
"metadata consumed";
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kFinished:
LOG(FATAL) << "FinishPullServerInitialMetadata called twice";
}
}
Poll<ValueOrFailure<bool>> CallState::PollPullServerToClientMessageAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullServerToClientMessageAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kUnstartedReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerInitialMetadataReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStarted:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kStartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
return false;
}
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kIdle:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kReading:
break;
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing a message";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing trailing metadata";
case ServerToClientPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(server_to_client_pull_state_, ServerToClientPullState::kReading);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_waiter_.pending();
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kTrailersOnly:
DCHECK_NE(server_trailing_metadata_state_,
ServerTrailingMetadataState::kNotPushed);
return false;
case ServerToClientPushState::kPushedMessage:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerToClientMessage;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return Failure{};
}
Crash("Unreachable");
}
void CallState::FinishPullServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
LOG(FATAL)
<< "FinishPullServerToClientMessage called before metadata available";
case ServerToClientPullState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called twice";
case ServerToClientPullState::kReading:
LOG(FATAL) << "FinishPullServerToClientMessage called before "
<< "PollPullServerToClientMessageAvailable";
case ServerToClientPullState::kProcessingServerToClientMessage:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "FinishPullServerToClientMessage called while processing "
"trailing metadata";
case ServerToClientPullState::kTerminated:
break;
}
switch (server_to_client_push_state_) {
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerToClientMessage called before initial "
"metadata consumed";
case ServerToClientPushState::kTrailersOnly:
LOG(FATAL) << "FinishPullServerToClientMessage called after "
"PushServerTrailingMetadata";
case ServerToClientPushState::kPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called without a message";
case ServerToClientPushState::kFinished:
break;
}
}
Poll<Empty> CallState::PollServerTrailingMetadataAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollServerTrailingMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kUnstartedReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kReading:
switch (server_to_client_push_state_) {
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kStart:
case ServerToClientPushState::kFinished:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
}
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
return server_trailing_metadata_waiter_.pending();
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollServerTrailingMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return Empty{};
}
Crash("Unreachable");
}
void CallState::FinishPullServerTrailingMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
LOG(FATAL) << "FinishPullServerTrailingMetadata called before "
"PollServerTrailingMetadataAvailable";
case ServerTrailingMetadataState::kPushed:
server_trailing_metadata_state_ = ServerTrailingMetadataState::kPulled;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPushedCancel:
server_trailing_metadata_state_ =
ServerTrailingMetadataState::kPulledCancel;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPulled:
case ServerTrailingMetadataState::kPulledCancel:
LOG(FATAL) << "FinishPullServerTrailingMetadata called twice";
}
}
Poll<bool> CallState::PollWasCancelled() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollWasCancelled: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_);
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
case ServerTrailingMetadataState::kPushed:
case ServerTrailingMetadataState::kPushedCancel: {
return server_trailing_metadata_waiter_.pending();
}
case ServerTrailingMetadataState::kPulled:
return false;
case ServerTrailingMetadataState::kPulledCancel:
return true;
}
Crash("Unreachable");
}
std::string CallState::DebugString() const {
return absl::StrCat(
"client_to_server_pull_state:", client_to_server_pull_state_,
" client_to_server_push_state:", client_to_server_push_state_,
" server_to_client_pull_state:", server_to_client_pull_state_,
" server_to_client_message_push_state:", server_to_client_push_state_,
" server_trailing_metadata_state:", server_trailing_metadata_state_,
client_to_server_push_waiter_.DebugString(),
" server_to_client_push_waiter:",
server_to_client_push_waiter_.DebugString(),
" client_to_server_pull_waiter:",
client_to_server_pull_waiter_.DebugString(),
" server_to_client_pull_waiter:",
server_to_client_pull_waiter_.DebugString(),
" server_trailing_metadata_waiter:",
server_trailing_metadata_waiter_.DebugString());
}
static_assert(sizeof(CallState) <= 16, "CallState too large");
} // namespace filters_detail
} // namespace grpc_core

@ -16,6 +16,7 @@
#define GRPC_SRC_CORE_LIB_TRANSPORT_CALL_FILTERS_H
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <type_traits>
@ -35,6 +36,7 @@
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/transport/call_final_info.h"
#include "src/core/lib/transport/call_state.h"
#include "src/core/lib/transport/message.h"
#include "src/core/lib/transport/metadata.h"
@ -120,6 +122,10 @@ struct NoInterceptor {};
namespace filters_detail {
inline void* Offset(void* base, size_t amt) {
return static_cast<char*>(base) + amt;
}
// One call filter constructor
// Contains enough information to allocate and initialize the
// call data for one filter.
@ -870,6 +876,17 @@ struct StackData {
// (to capture ownership of channel data)
std::vector<ChannelDataDestructor> channel_data_destructors;
bool empty() const {
return filter_constructor.empty() && filter_destructor.empty() &&
client_initial_metadata.ops.empty() &&
server_initial_metadata.ops.empty() &&
client_to_server_messages.ops.empty() &&
client_to_server_half_close.empty() &&
server_to_client_messages.ops.empty() &&
server_trailing_metadata.empty() && finalizers.empty() &&
channel_data_destructors.empty();
}
// Add one filter to the list of filters, and update alignment.
// Returns the offset of the call data for this filter.
// Specifically does not update any of the layouts or finalizers.
@ -1099,244 +1116,6 @@ class OperationExecutor {
const Operator<T>* end_ops_;
};
class CallState {
public:
CallState();
// Start the call: allows pulls to proceed
void Start();
// PUSH: client -> server
void BeginPushClientToServerMessage();
Poll<StatusFlag> PollPushClientToServerMessage();
void ClientToServerHalfClose();
// PULL: client -> server
void BeginPullClientInitialMetadata();
void FinishPullClientInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullClientToServerMessageAvailable();
void FinishPullClientToServerMessage();
// PUSH: server -> client
StatusFlag PushServerInitialMetadata();
void BeginPushServerToClientMessage();
Poll<StatusFlag> PollPushServerToClientMessage();
bool PushServerTrailingMetadata(bool cancel);
// PULL: server -> client
Poll<bool> PollPullServerInitialMetadataAvailable();
void FinishPullServerInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullServerToClientMessageAvailable();
void FinishPullServerToClientMessage();
Poll<Empty> PollServerTrailingMetadataAvailable();
void FinishPullServerTrailingMetadata();
Poll<bool> PollWasCancelled();
// Debug
std::string DebugString() const;
friend std::ostream& operator<<(std::ostream& out,
const CallState& call_state) {
return out << call_state.DebugString();
}
private:
enum class ClientToServerPullState : uint16_t {
// Ready to read: client initial metadata is there, but not yet processed
kBegin,
// Processing client initial metadata
kProcessingClientInitialMetadata,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingClientToServerMessage,
// Processing complete
kTerminated,
};
static const char* ClientToServerPullStateString(
ClientToServerPullState state) {
switch (state) {
case ClientToServerPullState::kBegin:
return "Begin";
case ClientToServerPullState::kProcessingClientInitialMetadata:
return "ProcessingClientInitialMetadata";
case ClientToServerPullState::kIdle:
return "Idle";
case ClientToServerPullState::kReading:
return "Reading";
case ClientToServerPullState::kProcessingClientToServerMessage:
return "ProcessingClientToServerMessage";
case ClientToServerPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPullState state) {
out.Append(ClientToServerPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPullState state) {
return out << ClientToServerPullStateString(state);
}
enum class ClientToServerPushState : uint16_t {
kIdle,
kPushedMessage,
kPushedHalfClose,
kPushedMessageAndHalfClosed,
kFinished,
};
static const char* ClientToServerPushStateString(
ClientToServerPushState state) {
switch (state) {
case ClientToServerPushState::kIdle:
return "Idle";
case ClientToServerPushState::kPushedMessage:
return "PushedMessage";
case ClientToServerPushState::kPushedHalfClose:
return "PushedHalfClose";
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return "PushedMessageAndHalfClosed";
case ClientToServerPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPushState state) {
out.Append(ClientToServerPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPushState state) {
return out << ClientToServerPushStateString(state);
}
enum class ServerToClientPullState : uint16_t {
// Not yet started: cannot read
kUnstarted,
kUnstartedReading,
kStarted,
kStartedReading,
// Processing server initial metadata
kProcessingServerInitialMetadata,
kProcessingServerInitialMetadataReading,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingServerToClientMessage,
// Processing server trailing metadata
kProcessingServerTrailingMetadata,
kTerminated,
};
static const char* ServerToClientPullStateString(
ServerToClientPullState state) {
switch (state) {
case ServerToClientPullState::kUnstarted:
return "Unstarted";
case ServerToClientPullState::kUnstartedReading:
return "UnstartedReading";
case ServerToClientPullState::kStarted:
return "Started";
case ServerToClientPullState::kStartedReading:
return "StartedReading";
case ServerToClientPullState::kProcessingServerInitialMetadata:
return "ProcessingServerInitialMetadata";
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return "ProcessingServerInitialMetadataReading";
case ServerToClientPullState::kIdle:
return "Idle";
case ServerToClientPullState::kReading:
return "Reading";
case ServerToClientPullState::kProcessingServerToClientMessage:
return "ProcessingServerToClientMessage";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
return "ProcessingServerTrailingMetadata";
case ServerToClientPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPullState state) {
out.Append(ServerToClientPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPullState state) {
return out << ServerToClientPullStateString(state);
}
enum class ServerToClientPushState : uint16_t {
kStart,
kPushedServerInitialMetadata,
kPushedServerInitialMetadataAndPushedMessage,
kTrailersOnly,
kIdle,
kPushedMessage,
kFinished,
};
static const char* ServerToClientPushStateString(
ServerToClientPushState state) {
switch (state) {
case ServerToClientPushState::kStart:
return "Start";
case ServerToClientPushState::kPushedServerInitialMetadata:
return "PushedServerInitialMetadata";
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
return "PushedServerInitialMetadataAndPushedMessage";
case ServerToClientPushState::kTrailersOnly:
return "TrailersOnly";
case ServerToClientPushState::kIdle:
return "Idle";
case ServerToClientPushState::kPushedMessage:
return "PushedMessage";
case ServerToClientPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPushState state) {
out.Append(ServerToClientPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPushState state) {
return out << ServerToClientPushStateString(state);
}
enum class ServerTrailingMetadataState : uint16_t {
kNotPushed,
kPushed,
kPushedCancel,
kPulled,
kPulledCancel,
};
static const char* ServerTrailingMetadataStateString(
ServerTrailingMetadataState state) {
switch (state) {
case ServerTrailingMetadataState::kNotPushed:
return "NotPushed";
case ServerTrailingMetadataState::kPushed:
return "Pushed";
case ServerTrailingMetadataState::kPushedCancel:
return "PushedCancel";
case ServerTrailingMetadataState::kPulled:
return "Pulled";
case ServerTrailingMetadataState::kPulledCancel:
return "PulledCancel";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerTrailingMetadataState state) {
out.Append(ServerTrailingMetadataStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerTrailingMetadataState state) {
return out << ServerTrailingMetadataStateString(state);
}
ClientToServerPullState client_to_server_pull_state_ : 3;
ClientToServerPushState client_to_server_push_state_ : 3;
ServerToClientPullState server_to_client_pull_state_ : 4;
ServerToClientPushState server_to_client_push_state_ : 3;
ServerTrailingMetadataState server_trailing_metadata_state_ : 3;
IntraActivityWaiter client_to_server_pull_waiter_;
IntraActivityWaiter server_to_client_pull_waiter_;
IntraActivityWaiter client_to_server_push_waiter_;
IntraActivityWaiter server_to_client_push_waiter_;
IntraActivityWaiter server_trailing_metadata_waiter_;
};
template <typename Fn>
class ServerTrailingMetadataInterceptor {
public:
@ -1505,7 +1284,11 @@ class CallFilters {
CallFilters(CallFilters&&) = delete;
CallFilters& operator=(CallFilters&&) = delete;
void SetStack(RefCountedPtr<Stack> stack);
void AddStack(RefCountedPtr<Stack> stack) {
if (stack->data_.empty()) return;
stacks_.emplace_back(std::move(stack));
}
void Start();
// Access client initial metadata before it's processed
ClientMetadata* unprocessed_client_initial_metadata() {
@ -1513,47 +1296,72 @@ class CallFilters {
}
private:
template <typename Output, void (filters_detail::CallState::*on_done)(),
typename Input>
Poll<ValueOrFailure<Output>> FinishStep(
Poll<filters_detail::ResultOr<Input>> p) {
auto* r = p.value_if_ready();
if (r == nullptr) return Pending{};
(call_state_.*on_done)();
if (r->ok != nullptr) {
return ValueOrFailure<Output>{std::move(r->ok)};
}
PushServerTrailingMetadata(std::move(r->error));
return Failure{};
}
template <typename Output, typename Input,
Input(CallFilters::*input_location),
filters_detail::Layout<Input>(filters_detail::StackData::*layout),
void (filters_detail::CallState::*on_done)()>
auto RunExecutor() {
DCHECK_NE((this->*input_location).get(), nullptr);
filters_detail::OperationExecutor<Input> executor;
return [this, executor = std::move(executor)]() mutable {
if ((this->*input_location) != nullptr) {
return FinishStep<Output, on_done>(
executor.Start(&(stack_->data_.*layout),
std::move(this->*input_location), call_data_));
void (CallState::*on_done)(), typename StackIterator>
class Executor {
public:
Executor(CallFilters* filters, StackIterator stack_begin,
StackIterator stack_end)
: stack_current_(stack_begin),
stack_end_(stack_end),
filters_(filters) {
DCHECK_NE((filters_->*input_location).get(), nullptr);
}
Poll<ValueOrFailure<Output>> operator()() {
if ((filters_->*input_location) != nullptr) {
if (stack_current_ == stack_end_) {
DCHECK_NE((filters_->*input_location).get(), nullptr);
(filters_->call_state_.*on_done)();
return Output(std::move(filters_->*input_location));
}
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout),
std::move(filters_->*input_location), filters_->call_data_));
} else {
return FinishStep(executor_.Step(filters_->call_data_));
}
return FinishStep<Output, on_done>(executor.Step(call_data_));
};
}
}
private:
Poll<ValueOrFailure<Output>> FinishStep(
Poll<filters_detail::ResultOr<Input>> p) {
auto* r = p.value_if_ready();
if (r == nullptr) return Pending{};
if (r->ok != nullptr) {
++stack_current_;
if (stack_current_ == stack_end_) {
(filters_->call_state_.*on_done)();
return ValueOrFailure<Output>{std::move(r->ok)};
}
return FinishStep(
executor_.Start(&(stack_current_->stack->data_.*layout),
std::move(r->ok), filters_->call_data_));
}
(filters_->call_state_.*on_done)();
filters_->PushServerTrailingMetadata(std::move(r->error));
return Failure{};
}
StackIterator stack_current_;
StackIterator stack_end_;
CallFilters* filters_;
filters_detail::OperationExecutor<Input> executor_;
};
public:
// Client: Fetch client initial metadata
// Returns a promise that resolves to ValueOrFailure<ClientMetadataHandle>
GRPC_MUST_USE_RESULT auto PullClientInitialMetadata() {
call_state_.BeginPullClientInitialMetadata();
return RunExecutor<
ClientMetadataHandle, ClientMetadataHandle,
&CallFilters::push_client_initial_metadata_,
&filters_detail::StackData::client_initial_metadata,
&filters_detail::CallState::FinishPullClientInitialMetadata>();
return Executor<ClientMetadataHandle, ClientMetadataHandle,
&CallFilters::push_client_initial_metadata_,
&filters_detail::StackData::client_initial_metadata,
&CallState::FinishPullClientInitialMetadata,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
}
// Server: Push server initial metadata
// Returns a promise that resolves to a StatusFlag indicating success
@ -1573,13 +1381,14 @@ class CallFilters {
has_server_initial_metadata,
[this]() {
return Map(
RunExecutor<
Executor<
absl::optional<ServerMetadataHandle>,
ServerMetadataHandle,
&CallFilters::push_server_initial_metadata_,
&filters_detail::StackData::server_initial_metadata,
&filters_detail::CallState::
FinishPullServerInitialMetadata>(),
&CallState::FinishPullServerInitialMetadata,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend()),
[](ValueOrFailure<absl::optional<ServerMetadataHandle>> r) {
if (r.ok()) return std::move(*r);
return absl::optional<ServerMetadataHandle>{};
@ -1612,12 +1421,13 @@ class CallFilters {
return If(
message_available,
[this]() {
return RunExecutor<
return Executor<
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_client_to_server_message_,
&filters_detail::StackData::client_to_server_messages,
&filters_detail::CallState::
FinishPullClientToServerMessage>();
&CallState::FinishPullClientToServerMessage,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
},
[]() -> ValueOrFailure<absl::optional<MessageHandle>> {
return absl::optional<MessageHandle>();
@ -1642,12 +1452,13 @@ class CallFilters {
return If(
message_available,
[this]() {
return RunExecutor<
return Executor<
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_server_to_client_message_,
&filters_detail::StackData::server_to_client_messages,
&filters_detail::CallState::
FinishPullServerToClientMessage>();
&CallState::FinishPullServerToClientMessage,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend());
},
[]() -> ValueOrFailure<absl::optional<MessageHandle>> {
return absl::optional<MessageHandle>();
@ -1665,12 +1476,17 @@ class CallFilters {
return Map(
[this]() { return call_state_.PollServerTrailingMetadataAvailable(); },
[this](Empty) {
auto result = std::move(push_server_trailing_metadata_);
auto value = std::move(push_server_trailing_metadata_);
if (call_data_ != nullptr) {
for (auto it = stacks_.crbegin(); it != stacks_.crend(); ++it) {
value = filters_detail::RunServerTrailingMetadata(
it->stack->data_.server_trailing_metadata,
filters_detail::Offset(call_data_, it->call_data_offset),
std::move(value));
}
}
call_state_.FinishPullServerTrailingMetadata();
if (call_data_ == nullptr) return result;
return filters_detail::RunServerTrailingMetadata(
stack_->data_.server_trailing_metadata, call_data_,
std::move(result));
return value;
});
}
// Server: Wait for server trailing metadata to have been sent
@ -1689,9 +1505,19 @@ class CallFilters {
private:
void CancelDueToFailedPipeOperation(SourceLocation but_where = {});
RefCountedPtr<Stack> stack_;
struct AddedStack {
explicit AddedStack(RefCountedPtr<Stack> stack)
: call_data_offset(std::numeric_limits<size_t>::max()),
stack(std::move(stack)) {}
size_t call_data_offset;
RefCountedPtr<Stack> stack;
};
using StacksVector = absl::InlinedVector<AddedStack, 2>;
StacksVector stacks_;
filters_detail::CallState call_state_;
CallState call_state_;
void* call_data_;
ClientMetadataHandle push_client_initial_metadata_;

@ -407,14 +407,12 @@ class UnstartedCallHandler {
return spine_->UnprocessedClientInitialMetadata();
}
// Helper for the very common situation in tests where we want to start a call
// with an empty filter stack.
CallHandler StartWithEmptyFilterStack() {
return StartCall(CallFilters::StackBuilder().Build());
void AddCallStack(RefCountedPtr<CallFilters::Stack> call_filters) {
spine_->call_filters().AddStack(std::move(call_filters));
}
CallHandler StartCall(RefCountedPtr<CallFilters::Stack> call_filters) {
spine_->call_filters().SetStack(std::move(call_filters));
CallHandler StartCall() {
spine_->call_filters().Start();
return CallHandler(std::move(spine_));
}

@ -0,0 +1,39 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/call_state.h"
namespace grpc_core {
std::string CallState::DebugString() const {
return absl::StrCat(
"client_to_server_pull_state:", client_to_server_pull_state_,
" client_to_server_push_state:", client_to_server_push_state_,
" server_to_client_pull_state:", server_to_client_pull_state_,
" server_to_client_message_push_state:", server_to_client_push_state_,
" server_trailing_metadata_state:", server_trailing_metadata_state_,
client_to_server_push_waiter_.DebugString(),
" server_to_client_push_waiter:",
server_to_client_push_waiter_.DebugString(),
" client_to_server_pull_waiter:",
client_to_server_pull_waiter_.DebugString(),
" server_to_client_pull_waiter:",
server_to_client_pull_waiter_.DebugString(),
" server_trailing_metadata_waiter:",
server_trailing_metadata_waiter_.DebugString());
}
static_assert(sizeof(CallState) <= 16, "CallState too large");
} // namespace grpc_core

@ -0,0 +1,957 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H
#include "absl/types/optional.h"
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
namespace grpc_core {
class CallState {
public:
CallState();
// Start the call: allows pulls to proceed
void Start();
// PUSH: client -> server
void BeginPushClientToServerMessage();
Poll<StatusFlag> PollPushClientToServerMessage();
void ClientToServerHalfClose();
// PULL: client -> server
void BeginPullClientInitialMetadata();
void FinishPullClientInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullClientToServerMessageAvailable();
void FinishPullClientToServerMessage();
// PUSH: server -> client
StatusFlag PushServerInitialMetadata();
void BeginPushServerToClientMessage();
Poll<StatusFlag> PollPushServerToClientMessage();
bool PushServerTrailingMetadata(bool cancel);
// PULL: server -> client
Poll<bool> PollPullServerInitialMetadataAvailable();
void FinishPullServerInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullServerToClientMessageAvailable();
void FinishPullServerToClientMessage();
Poll<Empty> PollServerTrailingMetadataAvailable();
void FinishPullServerTrailingMetadata();
Poll<bool> PollWasCancelled();
// Debug
std::string DebugString() const;
friend std::ostream& operator<<(std::ostream& out,
const CallState& call_state) {
return out << call_state.DebugString();
}
private:
enum class ClientToServerPullState : uint16_t {
// Ready to read: client initial metadata is there, but not yet processed
kBegin,
// Processing client initial metadata
kProcessingClientInitialMetadata,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingClientToServerMessage,
// Processing complete
kTerminated,
};
static const char* ClientToServerPullStateString(
ClientToServerPullState state) {
switch (state) {
case ClientToServerPullState::kBegin:
return "Begin";
case ClientToServerPullState::kProcessingClientInitialMetadata:
return "ProcessingClientInitialMetadata";
case ClientToServerPullState::kIdle:
return "Idle";
case ClientToServerPullState::kReading:
return "Reading";
case ClientToServerPullState::kProcessingClientToServerMessage:
return "ProcessingClientToServerMessage";
case ClientToServerPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPullState state) {
out.Append(ClientToServerPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPullState state) {
return out << ClientToServerPullStateString(state);
}
enum class ClientToServerPushState : uint16_t {
kIdle,
kPushedMessage,
kPushedHalfClose,
kPushedMessageAndHalfClosed,
kFinished,
};
static const char* ClientToServerPushStateString(
ClientToServerPushState state) {
switch (state) {
case ClientToServerPushState::kIdle:
return "Idle";
case ClientToServerPushState::kPushedMessage:
return "PushedMessage";
case ClientToServerPushState::kPushedHalfClose:
return "PushedHalfClose";
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return "PushedMessageAndHalfClosed";
case ClientToServerPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPushState state) {
out.Append(ClientToServerPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPushState state) {
return out << ClientToServerPushStateString(state);
}
enum class ServerToClientPullState : uint16_t {
// Not yet started: cannot read
kUnstarted,
kUnstartedReading,
kStarted,
kStartedReading,
// Processing server initial metadata
kProcessingServerInitialMetadata,
kProcessingServerInitialMetadataReading,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingServerToClientMessage,
// Processing server trailing metadata
kProcessingServerTrailingMetadata,
kTerminated,
};
static const char* ServerToClientPullStateString(
ServerToClientPullState state) {
switch (state) {
case ServerToClientPullState::kUnstarted:
return "Unstarted";
case ServerToClientPullState::kUnstartedReading:
return "UnstartedReading";
case ServerToClientPullState::kStarted:
return "Started";
case ServerToClientPullState::kStartedReading:
return "StartedReading";
case ServerToClientPullState::kProcessingServerInitialMetadata:
return "ProcessingServerInitialMetadata";
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return "ProcessingServerInitialMetadataReading";
case ServerToClientPullState::kIdle:
return "Idle";
case ServerToClientPullState::kReading:
return "Reading";
case ServerToClientPullState::kProcessingServerToClientMessage:
return "ProcessingServerToClientMessage";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
return "ProcessingServerTrailingMetadata";
case ServerToClientPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPullState state) {
out.Append(ServerToClientPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPullState state) {
return out << ServerToClientPullStateString(state);
}
enum class ServerToClientPushState : uint16_t {
kStart,
kPushedServerInitialMetadata,
kPushedServerInitialMetadataAndPushedMessage,
kTrailersOnly,
kIdle,
kPushedMessage,
kFinished,
};
static const char* ServerToClientPushStateString(
ServerToClientPushState state) {
switch (state) {
case ServerToClientPushState::kStart:
return "Start";
case ServerToClientPushState::kPushedServerInitialMetadata:
return "PushedServerInitialMetadata";
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
return "PushedServerInitialMetadataAndPushedMessage";
case ServerToClientPushState::kTrailersOnly:
return "TrailersOnly";
case ServerToClientPushState::kIdle:
return "Idle";
case ServerToClientPushState::kPushedMessage:
return "PushedMessage";
case ServerToClientPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPushState state) {
out.Append(ServerToClientPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPushState state) {
return out << ServerToClientPushStateString(state);
}
enum class ServerTrailingMetadataState : uint16_t {
kNotPushed,
kPushed,
kPushedCancel,
kPulled,
kPulledCancel,
};
static const char* ServerTrailingMetadataStateString(
ServerTrailingMetadataState state) {
switch (state) {
case ServerTrailingMetadataState::kNotPushed:
return "NotPushed";
case ServerTrailingMetadataState::kPushed:
return "Pushed";
case ServerTrailingMetadataState::kPushedCancel:
return "PushedCancel";
case ServerTrailingMetadataState::kPulled:
return "Pulled";
case ServerTrailingMetadataState::kPulledCancel:
return "PulledCancel";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerTrailingMetadataState state) {
out.Append(ServerTrailingMetadataStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerTrailingMetadataState state) {
return out << ServerTrailingMetadataStateString(state);
}
ClientToServerPullState client_to_server_pull_state_ : 3;
ClientToServerPushState client_to_server_push_state_ : 3;
ServerToClientPullState server_to_client_pull_state_ : 4;
ServerToClientPushState server_to_client_push_state_ : 3;
ServerTrailingMetadataState server_trailing_metadata_state_ : 3;
IntraActivityWaiter client_to_server_pull_waiter_;
IntraActivityWaiter server_to_client_pull_waiter_;
IntraActivityWaiter client_to_server_push_waiter_;
IntraActivityWaiter server_to_client_push_waiter_;
IntraActivityWaiter server_trailing_metadata_waiter_;
};
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline CallState::CallState()
: client_to_server_pull_state_(ClientToServerPullState::kBegin),
client_to_server_push_state_(ClientToServerPushState::kIdle),
server_to_client_pull_state_(ServerToClientPullState::kUnstarted),
server_to_client_push_state_(ServerToClientPushState::kStart),
server_trailing_metadata_state_(ServerTrailingMetadataState::kNotPushed) {
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void CallState::Start() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] Start: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kStarted;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kUnstartedReading:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "Start called twice";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
case ServerToClientPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPushClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedMessage;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "PushClientToServerMessage called twice concurrently";
break;
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "PushClientToServerMessage called after half-close";
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<StatusFlag>
CallState::PollPushClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
return Success{};
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::ClientToServerHalfClose() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] ClientToServerHalfClose: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ =
ClientToServerPushState::kPushedMessageAndHalfClosed;
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "ClientToServerHalfClose called twice";
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPullClientInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientInitialMetadata;
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "BeginPullClientInitialMetadata called twice";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullClientInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
LOG(FATAL) << "FinishPullClientInitialMetadata called before Begin";
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "Out of order FinishPullClientInitialMetadata";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<ValueOrFailure<bool>>
CallState::PollPullClientToServerMessageAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullClientToServerMessageAvailable: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
return client_to_server_pull_waiter_.pending();
case ClientToServerPullState::kIdle:
client_to_server_pull_state_ = ClientToServerPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ClientToServerPullState::kReading:
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "PollPullClientToServerMessageAvailable called while "
"processing a message";
break;
case ClientToServerPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(client_to_server_pull_state_, ClientToServerPullState::kReading);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientToServerMessage;
return true;
case ClientToServerPushState::kPushedHalfClose:
return false;
case ClientToServerPushState::kFinished:
client_to_server_pull_state_ = ClientToServerPullState::kTerminated;
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
LOG(FATAL) << "FinishPullClientToServerMessage called before Begin";
break;
case ClientToServerPullState::kIdle:
LOG(FATAL) << "FinishPullClientToServerMessage called twice";
break;
case ClientToServerPullState::kReading:
LOG(FATAL) << "FinishPullClientToServerMessage called before "
"PollPullClientToServerMessageAvailable";
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kTerminated:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ = ClientToServerPushState::kIdle;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "FinishPullClientToServerMessage called without a message";
break;
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline StatusFlag
CallState::PushServerInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PushServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_,
server_trailing_metadata_state_);
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return Failure{};
}
CHECK_EQ(server_to_client_push_state_, ServerToClientPushState::kStart);
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadata;
server_to_client_push_waiter_.Wake();
return Success{};
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPushServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "BeginPushServerToClientMessage called before "
"PushServerInitialMetadata";
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage;
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL) << "BeginPushServerToClientMessage called twice concurrently";
break;
case ServerToClientPushState::kTrailersOnly:
// Will fail in poll.
break;
case ServerToClientPushState::kIdle:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<StatusFlag>
CallState::PollPushServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
LOG(FATAL) << "PollPushServerToClientMessage called before "
<< "PushServerInitialMetadata";
case ServerToClientPushState::kTrailersOnly:
return false;
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
return Success{};
case ServerToClientPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline bool
CallState::PushServerTrailingMetadata(bool cancel) {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PushServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, cancel, server_trailing_metadata_state_,
server_to_client_push_state_,
client_to_server_push_state_,
server_trailing_metadata_waiter_.DebugString());
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_state_ =
cancel ? ServerTrailingMetadataState::kPushedCancel
: ServerTrailingMetadataState::kPushed;
server_trailing_metadata_waiter_.Wake();
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
server_to_client_push_state_ = ServerToClientPushState::kTrailersOnly;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kIdle:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kFinished:
case ServerToClientPushState::kTrailersOnly:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kFinished:
break;
}
return true;
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<bool>
CallState::PollPullServerInitialMetadataAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullServerInitialMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
bool reading;
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
return false;
}
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
reading = true;
break;
case ServerToClientPullState::kStarted:
reading = false;
break;
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerInitialMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return false;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kStarted ||
server_to_client_pull_state_ ==
ServerToClientPullState::kStartedReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_pull_state_ =
reading
? ServerToClientPullState::kProcessingServerInitialMetadataReading
: ServerToClientPullState::kProcessingServerInitialMetadata;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL)
<< "PollPullServerInitialMetadataAvailable after metadata processed";
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return false;
case ServerToClientPushState::kTrailersOnly:
return false;
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
LOG(FATAL) << "FinishPullServerInitialMetadata called before Start";
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
CHECK_EQ(server_to_client_push_state_,
ServerToClientPushState::kTrailersOnly);
return;
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "Out of order FinishPullServerInitialMetadata";
case ServerToClientPullState::kTerminated:
return;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kIdle ||
server_to_client_pull_state_ == ServerToClientPullState::kReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerInitialMetadata called before initial "
"metadata consumed";
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kFinished:
LOG(FATAL) << "FinishPullServerInitialMetadata called twice";
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<ValueOrFailure<bool>>
CallState::PollPullServerToClientMessageAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullServerToClientMessageAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kUnstartedReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerInitialMetadataReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStarted:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kStartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
return false;
}
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kIdle:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kReading:
break;
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing a message";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing trailing metadata";
case ServerToClientPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(server_to_client_pull_state_, ServerToClientPullState::kReading);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_waiter_.pending();
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kTrailersOnly:
DCHECK_NE(server_trailing_metadata_state_,
ServerTrailingMetadataState::kNotPushed);
return false;
case ServerToClientPushState::kPushedMessage:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerToClientMessage;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
LOG(FATAL)
<< "FinishPullServerToClientMessage called before metadata available";
case ServerToClientPullState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called twice";
case ServerToClientPullState::kReading:
LOG(FATAL) << "FinishPullServerToClientMessage called before "
<< "PollPullServerToClientMessageAvailable";
case ServerToClientPullState::kProcessingServerToClientMessage:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "FinishPullServerToClientMessage called while processing "
"trailing metadata";
case ServerToClientPullState::kTerminated:
break;
}
switch (server_to_client_push_state_) {
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerToClientMessage called before initial "
"metadata consumed";
case ServerToClientPushState::kTrailersOnly:
LOG(FATAL) << "FinishPullServerToClientMessage called after "
"PushServerTrailingMetadata";
case ServerToClientPushState::kPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called without a message";
case ServerToClientPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<Empty>
CallState::PollServerTrailingMetadataAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollServerTrailingMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kUnstartedReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kReading:
switch (server_to_client_push_state_) {
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kStart:
case ServerToClientPushState::kFinished:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
}
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
return server_trailing_metadata_waiter_.pending();
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollServerTrailingMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return Empty{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerTrailingMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
LOG(FATAL) << "FinishPullServerTrailingMetadata called before "
"PollServerTrailingMetadataAvailable";
case ServerTrailingMetadataState::kPushed:
server_trailing_metadata_state_ = ServerTrailingMetadataState::kPulled;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPushedCancel:
server_trailing_metadata_state_ =
ServerTrailingMetadataState::kPulledCancel;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPulled:
case ServerTrailingMetadataState::kPulledCancel:
LOG(FATAL) << "FinishPullServerTrailingMetadata called twice";
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<bool>
CallState::PollWasCancelled() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollWasCancelled: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_);
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
case ServerTrailingMetadataState::kPushed:
case ServerTrailingMetadataState::kPushedCancel: {
return server_trailing_metadata_waiter_.pending();
}
case ServerTrailingMetadataState::kPulled:
return false;
case ServerTrailingMetadataState::kPulledCancel:
return true;
}
Crash("Unreachable");
}
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H

@ -58,7 +58,8 @@ class CallStarter final : public UnstartedCallDestination {
}
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
destination_->HandleCall(unstarted_call_handler.StartCall(stack_));
unstarted_call_handler.AddCallStack(stack_);
destination_->HandleCall(unstarted_call_handler.StartCall());
}
private:
@ -79,16 +80,8 @@ class TerminalInterceptor final : public UnstartedCallDestination {
}
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
unstarted_call_handler.SpawnGuarded(
"start_call",
Map(interception_chain_detail::HijackCall(unstarted_call_handler,
destination_, stack_),
[](ValueOrFailure<HijackedCall> hijacked_call) -> StatusFlag {
if (!hijacked_call.ok()) return Failure{};
ForwardCall(hijacked_call.value().original_call_handler(),
hijacked_call.value().MakeLastCall());
return Success{};
}));
unstarted_call_handler.AddCallStack(stack_);
destination_->StartCall(unstarted_call_handler);
}
private:

@ -64,56 +64,64 @@ class HijackedCall final {
CallHandler call_handler_;
};
namespace interception_chain_detail {
inline auto HijackCall(UnstartedCallHandler unstarted_call_handler,
RefCountedPtr<UnstartedCallDestination> destination,
RefCountedPtr<CallFilters::Stack> stack) {
auto call_handler = unstarted_call_handler.StartCall(stack);
return Map(
call_handler.PullClientInitialMetadata(),
[call_handler,
destination](ValueOrFailure<ClientMetadataHandle> metadata) mutable
-> ValueOrFailure<HijackedCall> {
if (!metadata.ok()) return Failure{};
return HijackedCall(std::move(metadata.value()), std::move(destination),
std::move(call_handler));
});
}
} // namespace interception_chain_detail
// A delegating UnstartedCallDestination for use as a hijacking filter.
//
// This class provides the final StartCall method, and delegates to the
// InterceptCall() method for the actual interception. It has the same semantics
// as StartCall, but affords the implementation the ability to prepare the
// UnstartedCallHandler appropriately.
//
// Implementations may look at the unprocessed initial metadata
// and decide to do one of two things:
// and decide to do one of three things:
//
// 1. It can hijack the call. Returns a HijackedCall object that can
// be used to start new calls with the same metadata.
//
// 2. It can consume the call by calling `Consume`.
//
// 3. It can pass the call through to the next interceptor by calling
// `PassThrough`.
//
// Upon the StartCall call the UnstartedCallHandler will be from the last
// *Interceptor* in the call chain (without having been processed by any
// intervening filters) -- note that this is commonly not useful (not enough
// guarantees), and so it's usually better to Hijack and examine the metadata.
class Interceptor : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) final {
unstarted_call_handler.AddCallStack(filter_stack_);
InterceptCall(std::move(unstarted_call_handler));
}
protected:
virtual void InterceptCall(UnstartedCallHandler unstarted_call_handler) = 0;
// Returns a promise that resolves to a HijackedCall instance.
// Hijacking is the process of taking over a call and starting one or more new
// ones.
auto Hijack(UnstartedCallHandler unstarted_call_handler) {
return interception_chain_detail::HijackCall(
std::move(unstarted_call_handler), wrapped_destination_, filter_stack_);
auto call_handler = unstarted_call_handler.StartCall();
return Map(call_handler.PullClientInitialMetadata(),
[call_handler, destination = wrapped_destination_](
ValueOrFailure<ClientMetadataHandle> metadata) mutable
-> ValueOrFailure<HijackedCall> {
if (!metadata.ok()) return Failure{};
return HijackedCall(std::move(metadata.value()),
std::move(destination),
std::move(call_handler));
});
}
// Consume this call - it will not be passed on to any further filters.
CallHandler Consume(UnstartedCallHandler unstarted_call_handler) {
return unstarted_call_handler.StartCall(filter_stack_);
return unstarted_call_handler.StartCall();
}
// TODO(ctiller): Consider a Passthrough() method that allows the call to be
// passed on to the next filter in the chain without any interception by the
// current filter.
// Pass through this call to the next filter.
void PassThrough(UnstartedCallHandler unstarted_call_handler) {
wrapped_destination_->StartCall(std::move(unstarted_call_handler));
}
private:
friend class InterceptionChainBuilder;

@ -76,6 +76,8 @@ class SubchannelInterface : public DualRefCounted<SubchannelInterface> {
// Cancels a connectivity state watch.
// If the watcher has already been destroyed, this is a no-op.
// TODO(roth): This interface has an ABA issue. Fix this before we
// make this API public.
virtual void CancelConnectivityStateWatch(
ConnectivityStateWatcherInterface* watcher) = 0;
@ -96,6 +98,8 @@ class SubchannelInterface : public DualRefCounted<SubchannelInterface> {
std::unique_ptr<DataWatcherInterface> watcher) = 0;
// Cancels a data watch.
// TODO(roth): This interface has an ABA issue. Fix this before we
// make this API public.
virtual void CancelDataWatcher(DataWatcherInterface* watcher) = 0;
protected:

@ -419,7 +419,10 @@ class Channel(_base_channel.Channel):
# Locate ones created by `aio.Call`.
frame = stack[0]
candidate = frame.f_locals.get("self")
if candidate:
# Explicitly check for a non-null candidate instead of the more pythonic 'if candidate:'
# because doing 'if candidate:' assumes that the coroutine implements '__bool__' which
# might not always be the case.
if candidate is not None:
if isinstance(candidate, _base_call.Call):
if hasattr(candidate, "_channel"):
# For intercepted Call object

@ -692,6 +692,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/call_filters.cc',
'src/core/lib/transport/call_final_info.cc',
'src/core/lib/transport/call_spine.cc',
'src/core/lib/transport/call_state.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/interception_chain.cc',

@ -136,7 +136,7 @@ void BM_Unary(benchmark::State& state) {
// back a response.
auto unstarted_handler = helper.TakeHandler();
unstarted_handler.SpawnInfallible("run_handler", [&]() mutable {
auto handler = unstarted_handler.StartWithEmptyFilterStack();
auto handler = unstarted_handler.StartCall();
handler.PushServerInitialMetadata(Arena::MakePooled<ServerMetadata>());
auto response =
Arena::MakePooled<Message>(SliceBuffer(response_payload.Copy()), 0);

@ -117,7 +117,7 @@ class ClientCallTest : public YodelTest {
void Orphaned() override {}
void StartCall(UnstartedCallHandler handler) override {
CHECK(!test_->handler_.has_value());
test_->handler_.emplace(handler.StartWithEmptyFilterStack());
test_->handler_.emplace(handler.StartCall());
}
private:

@ -49,7 +49,7 @@ class ServerCallTest : public YodelTest {
SimpleArenaAllocator()->MakeArena());
call.initiator.SpawnGuarded(
"initial_metadata",
[this, handler = call.handler.StartWithEmptyFilterStack()]() mutable {
[this, handler = call.handler.StartCall()]() mutable {
return TrySeq(
handler.PullClientInitialMetadata(),
[this,

@ -127,7 +127,7 @@ class ClientChannelTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
handlers_.push(unstarted_call_handler.StartCall());
}
absl::optional<CallHandler> PopHandler() {

@ -75,7 +75,7 @@ class LoadBalancedCallDestinationTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
handlers_.push(unstarted_call_handler.StartCall());
}
absl::optional<CallHandler> PopHandler() {

@ -317,6 +317,7 @@ const NoInterceptor TestFilter1::Call::OnServerToClientMessage;
const NoInterceptor TestFilter1::Call::OnFinalize;
TEST(ChannelInitTest, CanCreateFilterWithCall) {
grpc::testing::TestGrpcScope g;
ChannelInit::Builder b;
b.RegisterFilter<TestFilter1>(GRPC_CLIENT_CHANNEL);
auto init = b.Build();

@ -87,6 +87,21 @@ grpc_cc_test(
],
)
grpc_cc_test(
name = "call_state_test",
srcs = ["call_state_test.cc"],
external_deps = [
"gtest",
],
language = "C++",
uses_event_engine = False,
uses_polling = False,
deps = [
"//src/core:call_state",
"//test/core/promise:poll_matcher",
],
)
grpc_cc_test(
name = "connectivity_state_test",
srcs = ["connectivity_state_test.cc"],

@ -27,7 +27,7 @@ class CallSpineFixture {
BenchmarkCall MakeCall() {
auto p = MakeCallPair(Arena::MakePooled<ClientMetadata>(),
event_engine_.get(), arena_allocator_->MakeArena());
return {std::move(p.initiator), p.handler.StartCall(stack_)};
return {std::move(p.initiator), p.handler.StartCall()};
}
ServerMetadataHandle MakeServerInitialMetadata() {
@ -48,8 +48,6 @@ class CallSpineFixture {
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator(
"test-allocator"),
1024);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
GRPC_CALL_SPINE_BENCHMARK(CallSpineFixture);
@ -61,7 +59,7 @@ class ForwardCallFixture {
auto p2 = MakeCallPair(Arena::MakePooled<ClientMetadata>(),
event_engine_.get(), arena_allocator_->MakeArena());
p1.handler.SpawnInfallible("initial_metadata", [&]() {
auto p1_handler = p1.handler.StartCall(stack_);
auto p1_handler = p1.handler.StartCall();
return Map(
p1_handler.PullClientInitialMetadata(),
[p1_handler, &p2](ValueOrFailure<ClientMetadataHandle> md) mutable {
@ -72,7 +70,7 @@ class ForwardCallFixture {
});
absl::optional<CallHandler> p2_handler;
p2.handler.SpawnInfallible("start", [&]() {
p2_handler = p2.handler.StartCall(stack_);
p2_handler = p2.handler.StartCall();
return Empty{};
});
return {std::move(p1.initiator), std::move(*p2_handler)};
@ -96,8 +94,6 @@ class ForwardCallFixture {
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator(
"test-allocator"),
1024);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
GRPC_CALL_SPINE_BENCHMARK(ForwardCallFixture);

@ -27,9 +27,6 @@ using testing::StrictMock;
namespace grpc_core {
namespace {
// Offset a void pointer by a given amount
void* Offset(void* base, size_t amt) { return static_cast<char*>(base) + amt; }
// A mock activity that can be activated and deactivated.
class MockActivity : public Activity, public Wakeable {
public:
@ -1102,246 +1099,6 @@ TEST(OperationExecutorTest, PromiseTwo) {
gpr_free_aligned(call_data1);
}
///////////////////////////////////////////////////////////////////////////////
// CallState
TEST(CallStateTest, NoOp) { CallState state; }
TEST(CallStateTest, StartTwiceCrashes) {
CallState state;
state.Start();
EXPECT_DEATH(state.Start(), "");
}
TEST(CallStateTest, PullServerInitialMetadataBlocksUntilStart) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady());
}
TEST(CallStateTest, PullClientInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_DEATH(state.FinishPullClientInitialMetadata(), "");
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
}
TEST(CallStateTest, ClientToServerMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPullClientInitialMetadata();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientInitialMetadata());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedClientToServerMessagesWithHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 3: push before polling and half close
state.BeginPushClientToServerMessage();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// ... and now we should see the half close
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ImmediateClientToServerHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ServerToClientMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity,
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(),
IsReady(true)));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerInitialMetadata());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedServerToClientMessages) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerInitialMetadata();
state.Start();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 3: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, ReceiveTrailersOnly) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, ReceiveTrailersOnlySkipsInitialMetadataOnUnstartedCalls) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, RecallNoCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(false));
}
TEST(CallStateTest, RecallCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(true);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(true));
}
TEST(CallStateTest, ReceiveTrailingMetadataAfterMessageRead) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerInitialMetadata();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerTrailingMetadata(false));
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(false));
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
}
} // namespace filters_detail
///////////////////////////////////////////////////////////////////////////////
@ -1405,7 +1162,101 @@ TEST(CallFiltersTest, UnaryCall) {
builder.Add(&f2);
auto arena = SimpleArenaAllocator()->MakeArena();
CallFilters filters(Arena::MakePooled<ClientMetadata>());
filters.SetStack(builder.Build());
filters.AddStack(builder.Build());
filters.Start();
promise_detail::Context<Arena> ctx(arena.get());
StrictMock<MockActivity> activity;
activity.Activate();
// Pull client initial metadata
auto pull_client_initial_metadata = filters.PullClientInitialMetadata();
EXPECT_THAT(pull_client_initial_metadata(), IsReady());
Mock::VerifyAndClearExpectations(&activity);
// Push client to server message
auto push_client_to_server_message = filters.PushClientToServerMessage(
Arena::MakePooled<Message>(SliceBuffer(), 0));
EXPECT_THAT(push_client_to_server_message(), IsPending());
auto pull_client_to_server_message = filters.PullClientToServerMessage();
// Pull client to server message, expect a wakeup
EXPECT_WAKEUP(activity,
EXPECT_THAT(pull_client_to_server_message(), IsReady()));
// Push should be done
EXPECT_THAT(push_client_to_server_message(), IsReady(Success{}));
// Push server initial metadata
filters.PushServerInitialMetadata(Arena::MakePooled<ServerMetadata>());
auto pull_server_initial_metadata = filters.PullServerInitialMetadata();
// Pull server initial metadata
EXPECT_THAT(pull_server_initial_metadata(), IsReady());
Mock::VerifyAndClearExpectations(&activity);
// Push server to client message
auto push_server_to_client_message = filters.PushServerToClientMessage(
Arena::MakePooled<Message>(SliceBuffer(), 0));
EXPECT_THAT(push_server_to_client_message(), IsPending());
auto pull_server_to_client_message = filters.PullServerToClientMessage();
// Pull server to client message, expect a wakeup
EXPECT_WAKEUP(activity,
EXPECT_THAT(pull_server_to_client_message(), IsReady()));
// Push should be done
EXPECT_THAT(push_server_to_client_message(), IsReady(Success{}));
// Push server trailing metadata
filters.PushServerTrailingMetadata(Arena::MakePooled<ServerMetadata>());
// Pull server trailing metadata
auto pull_server_trailing_metadata = filters.PullServerTrailingMetadata();
// Should be done
EXPECT_THAT(pull_server_trailing_metadata(), IsReady());
filters.Finalize(nullptr);
EXPECT_THAT(steps,
::testing::ElementsAre(
"f1:OnClientInitialMetadata", "f2:OnClientInitialMetadata",
"f1:OnClientToServerMessage", "f2:OnClientToServerMessage",
"f2:OnServerInitialMetadata", "f1:OnServerInitialMetadata",
"f2:OnServerToClientMessage", "f1:OnServerToClientMessage",
"f2:OnServerTrailingMetadata", "f1:OnServerTrailingMetadata",
"f1:OnFinalize", "f2:OnFinalize"));
}
TEST(CallFiltersTest, UnaryCallWithMultiStack) {
struct Filter {
struct Call {
void OnClientInitialMetadata(ClientMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnClientInitialMetadata"));
}
void OnServerInitialMetadata(ServerMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerInitialMetadata"));
}
void OnClientToServerMessage(Message&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnClientToServerMessage"));
}
void OnClientToServerHalfClose(Filter* f) {
f->steps.push_back(
absl::StrCat(f->label, ":OnClientToServerHalfClose"));
}
void OnServerToClientMessage(Message&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerToClientMessage"));
}
void OnServerTrailingMetadata(ServerMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerTrailingMetadata"));
}
void OnFinalize(const grpc_call_final_info*, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnFinalize"));
}
std::unique_ptr<int> i = std::make_unique<int>(3);
};
const std::string label;
std::vector<std::string>& steps;
};
std::vector<std::string> steps;
Filter f1{"f1", steps};
Filter f2{"f2", steps};
CallFilters::StackBuilder builder1;
CallFilters::StackBuilder builder2;
builder1.Add(&f1);
builder2.Add(&f2);
auto arena = SimpleArenaAllocator()->MakeArena();
CallFilters filters(Arena::MakePooled<ClientMetadata>());
filters.AddStack(builder1.Build());
filters.AddStack(builder2.Build());
filters.Start();
promise_detail::Context<Arena> ctx(arena.get());
StrictMock<MockActivity> activity;
activity.Activate();

@ -239,7 +239,8 @@ class FilterFixture {
BenchmarkCall MakeCall() {
auto p = MakeCallPair(traits_.MakeClientInitialMetadata(),
event_engine_.get(), arena_allocator_->MakeArena());
return {std::move(p.initiator), p.handler.StartCall(stack_)};
p.handler.AddCallStack(stack_);
return {std::move(p.initiator), p.handler.StartCall()};
}
ServerMetadataHandle MakeServerInitialMetadata() {
@ -284,7 +285,7 @@ class UnstartedCallDestinationFixture {
absl::optional<CallHandler> started_handler;
Notification started;
handler.SpawnInfallible("handler_setup", [&]() {
started_handler = handler.StartCall(stack_);
started_handler = handler.StartCall();
started.Notify();
return Empty{};
});
@ -296,7 +297,6 @@ class UnstartedCallDestinationFixture {
~UnstartedCallDestinationFixture() {
// TODO(ctiller): entire destructor can be deleted once ExecCtx is gone.
ExecCtx exec_ctx;
stack_.reset();
top_destination_.reset();
bottom_destination_.reset();
arena_allocator_.reset();
@ -353,8 +353,6 @@ class UnstartedCallDestinationFixture {
MakeRefCounted<SinkDestination>();
RefCountedPtr<UnstartedCallDestination> top_destination_ =
traits_->CreateCallDestination(bottom_destination_);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
} // namespace grpc_core

@ -145,13 +145,13 @@ void CallSpineTest::UnaryRequest(CallInitiator initiator, CallHandler handler) {
CALL_SPINE_TEST(UnaryRequest) {
auto call = MakeCall(MakeClientInitialMetadata());
UnaryRequest(call.initiator, call.handler.StartWithEmptyFilterStack());
UnaryRequest(call.initiator, call.handler.StartCall());
WaitForAllPendingWork();
}
CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
auto call1 = MakeCall(MakeClientInitialMetadata());
auto handler = call1.handler.StartWithEmptyFilterStack();
auto handler = call1.handler.StartCall();
SpawnTestSeq(
call1.initiator, "initiator",
[handler]() mutable { return handler.PullClientInitialMetadata(); },
@ -160,7 +160,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
EXPECT_TRUE(md.ok());
auto call2 = MakeCall(std::move(md.value()));
ForwardCall(handler, call2.initiator);
UnaryRequest(initiator, call2.handler.StartWithEmptyFilterStack());
UnaryRequest(initiator, call2.handler.StartCall());
return Empty{};
});
WaitForAllPendingWork();
@ -168,7 +168,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
CALL_SPINE_TEST(UnaryRequestThroughForwardCallWithServerTrailingMetadataHook) {
auto call1 = MakeCall(MakeClientInitialMetadata());
auto handler = call1.handler.StartWithEmptyFilterStack();
auto handler = call1.handler.StartCall();
bool got_md = false;
SpawnTestSeq(
call1.initiator, "initiator",
@ -179,7 +179,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCallWithServerTrailingMetadataHook) {
auto call2 = MakeCall(std::move(md.value()));
ForwardCall(handler, call2.initiator,
[&got_md](ServerMetadata&) { got_md = true; });
UnaryRequest(initiator, call2.handler.StartWithEmptyFilterStack());
UnaryRequest(initiator, call2.handler.StartCall());
return Empty{};
});
WaitForAllPendingWork();

@ -0,0 +1,310 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/call_state.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "test/core/promise/poll_matcher.h"
using testing::Mock;
using testing::StrictMock;
namespace grpc_core {
namespace {
// A mock activity that can be activated and deactivated.
class MockActivity : public Activity, public Wakeable {
public:
MOCK_METHOD(void, WakeupRequested, ());
void ForceImmediateRepoll(WakeupMask /*mask*/) override { WakeupRequested(); }
void Orphan() override {}
Waker MakeOwningWaker() override { return Waker(this, 0); }
Waker MakeNonOwningWaker() override { return Waker(this, 0); }
void Wakeup(WakeupMask /*mask*/) override { WakeupRequested(); }
void WakeupAsync(WakeupMask /*mask*/) override { WakeupRequested(); }
void Drop(WakeupMask /*mask*/) override {}
std::string DebugTag() const override { return "MockActivity"; }
std::string ActivityDebugTag(WakeupMask /*mask*/) const override {
return DebugTag();
}
void Activate() {
if (scoped_activity_ == nullptr) {
scoped_activity_ = std::make_unique<ScopedActivity>(this);
}
}
void Deactivate() { scoped_activity_.reset(); }
private:
std::unique_ptr<ScopedActivity> scoped_activity_;
};
#define EXPECT_WAKEUP(activity, statement) \
EXPECT_CALL((activity), WakeupRequested()).Times(::testing::AtLeast(1)); \
statement; \
Mock::VerifyAndClearExpectations(&(activity));
} // namespace
TEST(CallStateTest, NoOp) { CallState state; }
TEST(CallStateTest, StartTwiceCrashes) {
CallState state;
state.Start();
EXPECT_DEATH(state.Start(), "");
}
TEST(CallStateTest, PullServerInitialMetadataBlocksUntilStart) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady());
}
TEST(CallStateTest, PullClientInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_DEATH(state.FinishPullClientInitialMetadata(), "");
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
}
TEST(CallStateTest, ClientToServerMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPullClientInitialMetadata();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientInitialMetadata());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedClientToServerMessagesWithHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 3: push before polling and half close
state.BeginPushClientToServerMessage();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// ... and now we should see the half close
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ImmediateClientToServerHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ServerToClientMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity,
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(),
IsReady(true)));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerInitialMetadata());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedServerToClientMessages) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerInitialMetadata();
state.Start();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 3: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, ReceiveTrailersOnly) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, ReceiveTrailersOnlySkipsInitialMetadataOnUnstartedCalls) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, RecallNoCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(false));
}
TEST(CallStateTest, RecallCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(true);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(true));
}
TEST(CallStateTest, ReceiveTrailingMetadataAfterMessageRead) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerInitialMetadata();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerTrailingMetadata(false));
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(false));
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc_tracer_init();
return RUN_ALL_TESTS();
}

@ -188,7 +188,7 @@ TEST_F(ClientTransportTest, AddOneStreamWithWriteFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
call.initiator.SpawnGuarded("test-send",
[initiator = call.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -232,7 +232,7 @@ TEST_F(ClientTransportTest, AddOneStreamWithReadFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
call.initiator.SpawnGuarded("test-send",
[initiator = call.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -284,9 +284,9 @@ TEST_F(ClientTransportTest, AddMultipleStreamWithWriteFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call1 = MakeCall(TestInitialMetadata());
transport->StartCall(call1.handler.StartWithEmptyFilterStack());
transport->StartCall(call1.handler.StartCall());
auto call2 = MakeCall(TestInitialMetadata());
transport->StartCall(call2.handler.StartWithEmptyFilterStack());
transport->StartCall(call2.handler.StartCall());
call1.initiator.SpawnGuarded(
"test-send-1", [initiator = call1.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -352,9 +352,9 @@ TEST_F(ClientTransportTest, AddMultipleStreamWithReadFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call1 = MakeCall(TestInitialMetadata());
transport->StartCall(call1.handler.StartWithEmptyFilterStack());
transport->StartCall(call1.handler.StartCall());
auto call2 = MakeCall(TestInitialMetadata());
transport->StartCall(call2.handler.StartWithEmptyFilterStack());
transport->StartCall(call2.handler.StartCall());
call1.initiator.SpawnGuarded(
"test-send", [initiator = call1.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);

@ -120,7 +120,7 @@ TEST_F(TransportTest, AddOneStream) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
StrictMock<MockFunction<void()>> on_done;
EXPECT_CALL(on_done, Call());
control_endpoint.ExpectWrite(
@ -206,7 +206,7 @@ TEST_F(TransportTest, AddOneStreamMultipleMessages) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
StrictMock<MockFunction<void()>> on_done;
EXPECT_CALL(on_done, Call());
control_endpoint.ExpectWrite(

@ -125,7 +125,7 @@ TEST_F(TransportTest, ReadAndWriteOneMessage) {
.get_pointer(HttpPathMetadata())
->as_string_view(),
"/demo.Service/Step");
auto handler = unstarted_call_handler.StartWithEmptyFilterStack();
auto handler = unstarted_call_handler.StartCall();
handler.SpawnInfallible("test-io", [&on_done, handler]() mutable {
return Seq(
handler.PullClientInitialMetadata(),

@ -156,7 +156,7 @@ const NoInterceptor FailsToInstantiateFilter<I>::Call::OnFinalize;
template <int I>
class TestConsumingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
Consume(std::move(unstarted_call_handler))
.PushServerTrailingMetadata(
ServerMetadataFromStatus(absl::InternalError("👊 consumed")));
@ -169,13 +169,30 @@ class TestConsumingInterceptor final : public Interceptor {
}
};
///////////////////////////////////////////////////////////////////////////////
// Test call interceptor - passes through calls
template <int I>
class TestPassThroughInterceptor final : public Interceptor {
public:
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
PassThrough(std::move(unstarted_call_handler));
}
void Orphaned() override {}
static absl::StatusOr<RefCountedPtr<TestPassThroughInterceptor<I>>> Create(
const ChannelArgs& channel_args, ChannelFilter::Args filter_args) {
MaybeLogCreation(channel_args, filter_args, I);
return MakeRefCounted<TestPassThroughInterceptor<I>>();
}
};
///////////////////////////////////////////////////////////////////////////////
// Test call interceptor - fails to instantiate
template <int I>
class TestFailingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
Crash("unreachable");
}
void Orphaned() override {}
@ -192,7 +209,7 @@ class TestFailingInterceptor final : public Interceptor {
template <int I>
class TestHijackingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
unstarted_call_handler.SpawnInfallible(
"hijack", [this, unstarted_call_handler]() mutable {
return Map(Hijack(std::move(unstarted_call_handler)),
@ -293,6 +310,20 @@ TEST_F(InterceptionChainTest, Empty) {
EXPECT_NE(finished_call.client_metadata, nullptr);
}
TEST_F(InterceptionChainTest, PassThrough) {
auto r = InterceptionChainBuilder(ChannelArgs())
.Add<TestPassThroughInterceptor<1>>()
.Build(destination());
ASSERT_TRUE(r.ok()) << r.status();
auto finished_call = RunCall(r.value().get());
EXPECT_EQ(finished_call.server_metadata->get(GrpcStatusMetadata()),
GRPC_STATUS_INTERNAL);
EXPECT_EQ(finished_call.server_metadata->get_pointer(GrpcMessageMetadata())
->as_string_view(),
"👊 cancelled");
EXPECT_NE(finished_call.client_metadata, nullptr);
}
TEST_F(InterceptionChainTest, Consumed) {
auto r = InterceptionChainBuilder(ChannelArgs())
.Add<TestConsumingInterceptor<1>>()

@ -34,7 +34,7 @@ CallInitiator TransportTest::CreateCall(
call.handler.SpawnInfallible(
"start-call", [this, handler = call.handler]() mutable {
transport_pair_.client->client_transport()->StartCall(
handler.StartWithEmptyFilterStack());
handler.StartCall());
return Empty{};
});
return std::move(call.initiator);
@ -54,7 +54,7 @@ CallHandler TransportTest::TickUntilServerCall() {
void TransportTest::ServerCallDestination::StartCall(
UnstartedCallHandler handler) {
handlers_.push(handler.StartWithEmptyFilterStack());
handlers_.push(handler.StartCall());
}
absl::optional<CallHandler> TransportTest::ServerCallDestination::PopHandler() {

@ -779,6 +779,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -779,6 +779,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
@ -2699,6 +2700,8 @@ src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_final_info.h \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_spine.h \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/call_state.h \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/connectivity_state.h \
src/core/lib/transport/custom_metadata.h \

@ -786,6 +786,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -786,6 +786,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
@ -2472,6 +2473,8 @@ src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_final_info.h \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_spine.h \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/call_state.h \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/connectivity_state.h \
src/core/lib/transport/custom_metadata.h \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -1525,6 +1525,30 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "call_state_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,

@ -101,7 +101,6 @@ DEPRECATED_FUNCTION_TEMP_ALLOW_LIST = {
"./src/core/lib/event_engine/posix_engine/timer_manager.cc",
"./src/core/lib/event_engine/windows/windows_endpoint.cc",
"./src/core/lib/event_engine/windows/windows_engine.cc",
"./src/core/lib/experiments/config.cc",
"./src/core/lib/gprpp/time.h",
"./src/core/lib/gprpp/work_serializer.cc",
"./src/core/lib/iomgr/call_combiner.cc",

Loading…
Cancel
Save