Merge branch 'no-infallible-seq' into step-by-step

pull/36985/head
Craig Tiller 8 months ago
commit aeaca27a6d
  1. 5
      BUILD
  2. 59
      CMakeLists.txt
  3. 1
      Makefile
  4. 2
      Package.swift
  5. 57
      TROUBLESHOOTING.md
  6. 54
      build_autogenerated.yaml
  7. 1
      config.m4
  8. 1
      config.w32
  9. 16
      doc/environment_variables.md
  10. 1
      doc/trace_flags.md
  11. 38
      examples/cpp/retry/BUILD
  12. 73
      examples/cpp/retry/CMakeLists.txt
  13. 69
      examples/cpp/retry/README.md
  14. 98
      examples/cpp/retry/client.cc
  15. 86
      examples/cpp/retry/server.cc
  16. 2
      gRPC-C++.podspec
  17. 3
      gRPC-Core.podspec
  18. 2
      grpc.gemspec
  19. 2
      package.xml
  20. 21
      src/core/BUILD
  21. 71
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  22. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.h
  23. 49
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  24. 2
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.h
  25. 96
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  26. 2
      src/core/ext/transport/chttp2/client/chttp2_connector.h
  27. 175
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  28. 374
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  29. 4
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  30. 2
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  31. 5
      src/core/ext/transport/chttp2/transport/internal.h
  32. 2
      src/core/ext/transport/chttp2/transport/parsing.cc
  33. 2
      src/core/ext/transport/chttp2/transport/writing.cc
  34. 16
      src/core/handshaker/endpoint_info/endpoint_info_handshaker.cc
  35. 238
      src/core/handshaker/handshaker.cc
  36. 94
      src/core/handshaker/handshaker.h
  37. 233
      src/core/handshaker/http_connect/http_connect_handshaker.cc
  38. 55
      src/core/handshaker/security/secure_endpoint.cc
  39. 8
      src/core/handshaker/security/secure_endpoint.h
  40. 214
      src/core/handshaker/security/security_handshaker.cc
  41. 85
      src/core/handshaker/tcp_connect/tcp_connect_handshaker.cc
  42. 2
      src/core/lib/debug/trace_flags.cc
  43. 1
      src/core/lib/debug/trace_flags.h
  44. 4
      src/core/lib/debug/trace_flags.yaml
  45. 2
      src/core/lib/iomgr/endpoint.h
  46. 695
      src/core/lib/transport/call_filters.cc
  47. 298
      src/core/lib/transport/call_filters.h
  48. 39
      src/core/lib/transport/call_state.cc
  49. 957
      src/core/lib/transport/call_state.h
  50. 12
      src/core/lib/transport/interception_chain.h
  51. 110
      src/core/load_balancing/grpclb/grpclb.cc
  52. 291
      src/core/load_balancing/pick_first/pick_first.cc
  53. 128
      src/core/load_balancing/priority/priority.cc
  54. 16
      src/core/load_balancing/ring_hash/ring_hash.cc
  55. 195
      src/core/load_balancing/rls/rls.cc
  56. 117
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  57. 19
      src/core/load_balancing/xds/cds.cc
  58. 7
      src/core/load_balancing/xds/xds_cluster_manager.cc
  59. 54
      src/core/util/http_client/httpcli.cc
  60. 7
      src/core/util/http_client/httpcli.h
  61. 5
      src/python/grpcio/grpc/aio/_channel.py
  62. 1
      src/python/grpcio/grpc_core_dependencies.py
  63. 2
      test/core/bad_client/bad_client.cc
  64. 11
      test/core/bad_connection/close_fd_test.cc
  65. 13
      test/core/end2end/fixtures/sockpair_fixture.h
  66. 6
      test/core/end2end/fuzzers/client_fuzzer.cc
  67. 4
      test/core/end2end/tests/max_connection_idle.cc
  68. 56
      test/core/handshake/readahead_handshaker_server_ssl.cc
  69. 22
      test/core/security/secure_endpoint_test.cc
  70. 30
      test/core/security/ssl_server_fuzzer.cc
  71. 1
      test/core/surface/channel_init_test.cc
  72. 15
      test/core/transport/BUILD
  73. 240
      test/core/transport/call_filters_test.cc
  74. 310
      test/core/transport/call_state_test.cc
  75. 6
      test/core/transport/chttp2/graceful_shutdown_test.cc
  76. 24
      test/core/transport/chttp2/ping_configuration_test.cc
  77. 9
      test/cpp/microbenchmarks/fullstack_fixtures.h
  78. 18
      test/cpp/performance/writes_per_rpc_test.cc
  79. 2
      tools/doxygen/Doxyfile.c++.internal
  80. 2
      tools/doxygen/Doxyfile.core.internal
  81. 24
      tools/run_tests/generated/tests.json
  82. 9
      tools/run_tests/sanity/banned_functions.py

@ -2285,6 +2285,7 @@ grpc_cc_library(
external_deps = [
"absl/base:core_headers",
"absl/container:inlined_vector",
"absl/functional:any_invocable",
"absl/log:check",
"absl/log:log",
"absl/status",
@ -2309,6 +2310,7 @@ grpc_cc_library(
"grpc_trace",
"handshaker",
"iomgr",
"orphanable",
"promise",
"ref_counted_ptr",
"resource_quota_api",
@ -3192,9 +3194,11 @@ grpc_cc_library(
external_deps = [
"absl/base:core_headers",
"absl/container:inlined_vector",
"absl/functional:any_invocable",
"absl/log:check",
"absl/log:log",
"absl/status",
"absl/status:statusor",
"absl/strings:str_format",
],
language = "c++",
@ -3211,6 +3215,7 @@ grpc_cc_library(
"grpc_public_hdrs",
"grpc_trace",
"iomgr",
"orphanable",
"ref_counted_ptr",
"//src/core:channel_args",
"//src/core:closure",

59
CMakeLists.txt generated

@ -991,6 +991,7 @@ if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx call_spine_test)
endif()
add_dependencies(buildtests_cxx call_state_test)
add_dependencies(buildtests_cxx call_tracer_test)
add_dependencies(buildtests_cxx call_utils_test)
add_dependencies(buildtests_cxx cancel_after_accept_test)
@ -2526,6 +2527,7 @@ add_library(grpc
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -3280,6 +3282,7 @@ add_library(grpc_unsecure
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -5403,6 +5406,7 @@ add_library(grpc_authorization_provider
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -8861,6 +8865,7 @@ add_executable(call_filters_test
src/core/lib/surface/channel_stack_type.cc
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/message.cc
src/core/lib/transport/metadata.cc
@ -9087,6 +9092,58 @@ endif()
endif()
if(gRPC_BUILD_TESTS)
add_executable(call_state_test
src/core/lib/debug/trace.cc
src/core/lib/debug/trace_flags.cc
src/core/lib/gprpp/dump_args.cc
src/core/lib/gprpp/glob.cc
src/core/lib/promise/activity.cc
src/core/lib/transport/call_state.cc
test/core/transport/call_state_test.cc
)
if(WIN32 AND MSVC)
if(BUILD_SHARED_LIBS)
target_compile_definitions(call_state_test
PRIVATE
"GPR_DLL_IMPORTS"
)
endif()
endif()
target_compile_features(call_state_test PUBLIC cxx_std_14)
target_include_directories(call_state_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(call_state_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
absl::config
absl::flat_hash_map
absl::hash
absl::type_traits
absl::statusor
gpr
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(call_tracer_test
test/core/telemetry/call_tracer_test.cc
test/core/test_util/fake_stats_plugin.cc
@ -9338,6 +9395,7 @@ add_executable(call_utils_test
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc
@ -18808,6 +18866,7 @@ add_executable(interception_chain_test
src/core/lib/transport/call_filters.cc
src/core/lib/transport/call_final_info.cc
src/core/lib/transport/call_spine.cc
src/core/lib/transport/call_state.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/interception_chain.cc

1
Makefile generated

@ -1343,6 +1343,7 @@ LIBGRPC_SRC = \
src/core/lib/transport/call_filters.cc \
src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
src/core/lib/transport/interception_chain.cc \

2
Package.swift generated

@ -1694,6 +1694,8 @@ let package = Package(
"src/core/lib/transport/call_final_info.h",
"src/core/lib/transport/call_spine.cc",
"src/core/lib/transport/call_spine.h",
"src/core/lib/transport/call_state.cc",
"src/core/lib/transport/call_state.h",
"src/core/lib/transport/connectivity_state.cc",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/custom_metadata.h",

@ -7,44 +7,47 @@ This guide is for troubleshooting gRPC implementations based on C core library (
Extra logging can be very useful for diagnosing problems. It can be used to increase the amount of information
that gets printed to stderr.
## GRPC_VERBOSITY
## Setting Logging Severity and Verbosity
<!-- BEGIN_GOOGLE_INTERNAL_DOCUMENTATION
GRPC_VERBOSITY has been disabled for internal usage and will not work anymore.
If anyone wants to debug, we need to set verbose logs using absl.
END_GOOGLE_INTERNAL_DOCUMENTATION -->
[gRPC uses absl logging](https://abseil.io/docs/cpp/guides/logging).
Verbosity can be set using absl flags such as
`--minloglevel`, `--v` and `--vmodule`.
<!-- BEGIN_OPEN_SOURCE_DOCUMENTATION -->
`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed. `ERROR` is recomeded for production systems.
<!-- END_OPEN_SOURCE_DOCUMENTATION -->
These can also be programmatically set using
[these absl APIs.](https://github.com/abseil/abseil-cpp/blob/master/absl/log/globals.h)
## GRPC_TRACE
Example
```
# Disable all logs other than FATAL for the entire application
./helloworld_application_using_grpc --v=-1 --minloglevel=3
```
## GRPC_VERBOSITY (DEPRECATED)
<!-- BEGIN_GOOGLE_INTERNAL_DOCUMENTATION
GRPC_VERBOSITY has been disabled for internal usage and will not work anymore.
If anyone wants to debug, we need to set verbose logs using absl.
END_GOOGLE_INTERNAL_DOCUMENTATION -->
[Environment Variables Overview](doc/environment_variables.md)
`GRPC_TRACE` can be used to enable extra logging for some internal gRPC components. Enabling the right traces can be invaluable
for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are listed in [Environment Variables Overview](doc/environment_variables.md).
## GRPC_TRACE
`GRPC_TRACE` can be used to enable extra logging for specific internal gRPC components. Enabling the right traces can be invaluable
for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are [listed here](doc/trace_flags.md).
Multiple traces can be enabled at once (use comma as separator).
```
# Enable debug logs for an application
GRPC_VERBOSITY=debug ./helloworld_application_using_grpc
# Enable debug logs for the entire application
./helloworld_application_using_grpc --v=2 --minloglevel=0
```
```
# Print information about invocations of low-level C core API.
# Note that trace logs of log level DEBUG won't be displayed.
# Also note that most tracers user log level INFO, so without setting
# GPRC_VERBOSITY accordingly, no traces will be printed.
GRPC_VERBOSITY=info GRPC_TRACE=api ./helloworld_application_using_grpc
# Note that trace logs that use `VLOG` won't be displayed.
# Many tracers user log level INFO.
# So unless absl settings are correct, no traces will be printed.
GRPC_TRACE=api ./helloworld_application_using_grpc --v=-1 --minloglevel=0
```
```
# Print info from 3 different tracers, including tracing logs with log level DEBUG
GRPC_VERBOSITY=debug GRPC_TRACE=tcp,http,api ./helloworld_application_using_grpc
# Print info from 3 different tracers, including tracing logs
GRPC_TRACE=tcp,http,api ./helloworld_application_using_grpc --v=2 --minloglevel=0
```
Known limitations: `GPRC_TRACE=tcp` is currently not implemented for Windows (you won't see any tcp traces).
@ -52,3 +55,11 @@ Known limitations: `GPRC_TRACE=tcp` is currently not implemented for Windows (yo
Please note that the `GRPC_TRACE` environment variable has nothing to do with gRPC's "tracing" feature (= tracing RPCs in
microservice environment to gain insight about how requests are processed by deployment), it is merely used to enable printing
of extra logs.
## Preventing gRPC Log Noise
Log noise could consume a lot of resources. We recommend tuning settings for production systems very carefully.
* Avoid using GRPC_VERBOSITY flag. This has been deprecated. If this value of this flag is anything other than "ERROR" or "NONE" it will cause log noise.
* Always avoid setting --v and --vmodule to anything other than -1 for production systems.
* Avoid setting --minloglevel=0 for production systems. Anyting greater than 0 should be fine.
* If setting this does not eliminate your log noise, look for instances of functions `--v`, `--vmodule`, `absl::SetVLogLevel` and `absl::SetMinLogLevel` in your entire codebase and any libraries/components/configs that you may be using.

@ -1099,6 +1099,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -1908,6 +1909,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -2604,6 +2606,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -3027,6 +3030,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -4701,6 +4705,7 @@ libs:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -5001,6 +5006,7 @@ libs:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -6584,6 +6590,7 @@ targets:
- src/core/lib/surface/channel_stack_type.h
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/http2_errors.h
@ -6651,6 +6658,7 @@ targets:
- src/core/lib/surface/channel_stack_type.cc
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/message.cc
- src/core/lib/transport/metadata.cc
@ -6782,6 +6790,48 @@ targets:
- linux
- posix
uses_polling: false
- name: call_state_test
gtest: true
build: test
language: c++
headers:
- src/core/lib/debug/trace.h
- src/core/lib/debug/trace_flags.h
- src/core/lib/debug/trace_impl.h
- src/core/lib/event_engine/event_engine_context.h
- src/core/lib/gprpp/atomic_utils.h
- src/core/lib/gprpp/down_cast.h
- src/core/lib/gprpp/dump_args.h
- src/core/lib/gprpp/glob.h
- src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/promise/activity.h
- src/core/lib/promise/context.h
- src/core/lib/promise/detail/promise_factory.h
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/status_flag.h
- src/core/lib/transport/call_state.h
- test/core/promise/poll_matcher.h
src:
- src/core/lib/debug/trace.cc
- src/core/lib/debug/trace_flags.cc
- src/core/lib/gprpp/dump_args.cc
- src/core/lib/gprpp/glob.cc
- src/core/lib/promise/activity.cc
- src/core/lib/transport/call_state.cc
- test/core/transport/call_state_test.cc
deps:
- gtest
- absl/base:config
- absl/container:flat_hash_map
- absl/hash:hash
- absl/meta:type_traits
- absl/status:statusor
- gpr
uses_polling: false
- name: call_tracer_test
gtest: true
build: test
@ -7065,6 +7115,7 @@ targets:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -7333,6 +7384,7 @@ targets:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc
@ -12788,6 +12840,7 @@ targets:
- src/core/lib/transport/call_filters.h
- src/core/lib/transport/call_final_info.h
- src/core/lib/transport/call_spine.h
- src/core/lib/transport/call_state.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/custom_metadata.h
- src/core/lib/transport/error_utils.h
@ -13057,6 +13110,7 @@ targets:
- src/core/lib/transport/call_filters.cc
- src/core/lib/transport/call_final_info.cc
- src/core/lib/transport/call_spine.cc
- src/core/lib/transport/call_state.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/interception_chain.cc

1
config.m4 generated

@ -718,6 +718,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/transport/call_filters.cc \
src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
src/core/lib/transport/interception_chain.cc \

1
config.w32 generated

@ -683,6 +683,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\transport\\call_filters.cc " +
"src\\core\\lib\\transport\\call_final_info.cc " +
"src\\core\\lib\\transport\\call_spine.cc " +
"src\\core\\lib\\transport\\call_state.cc " +
"src\\core\\lib\\transport\\connectivity_state.cc " +
"src\\core\\lib\\transport\\error_utils.cc " +
"src\\core\\lib\\transport\\interception_chain.cc " +

@ -46,14 +46,13 @@ some configuration as environment variables that can be set.
Available tracers and their usage can be found in
[gRPC Trace Flags](trace_flags.md)
* GRPC_VERBOSITY
<!-- BEGIN_GOOGLE_INTERNAL_DOCUMENTATION"
GRPC_VERBOSITY has been disabled for internal usage and will not work anymore.
If anyone wants to debug, we need to set verbose logs using absl.
END_GOOGLE_INTERNAL_DOCUMENTATION -->
* GRPC_VERBOSITY (DEPRECATED)
<!-- BEGIN_OPEN_SOURCE_DOCUMENTATION -->
`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed.
`GRPC_VERBOSITY` is used to set the minimum level of log messages printed. Supported values are `DEBUG`, `INFO`, `ERROR` and `NONE`.
We only support this flag for legacy reasons. If this environment variable is set, then gRPC will set absl MinLogValue and absl SetVLogLevel. This will alter the log settings of the entire application, not just gRPC code. For that reason, it is not recommended. Our recommendation is to avoid using this flag and [set log verbosity using absl](https://abseil.io/docs/cpp/guides/logging).
gRPC logging verbosity - one of:
- DEBUG - log INFO, WARNING, ERROR and FATAL messages. Also sets absl VLOG(2) logs enabled. This is not recommended for production systems. This will be expensive for staging environments too, so it can be used when you want to debug a specific issue.
- INFO - log INFO, WARNING, ERROR and FATAL messages. This is not recommended for production systems. This may be slightly expensive for staging environments too. We recommend that you use your discretion for staging environments.
@ -65,9 +64,8 @@ END_GOOGLE_INTERNAL_DOCUMENTATION -->
- If nothing is set by the external application also, the default set by absl will be honoured.
<!-- END_OPEN_SOURCE_DOCUMENTATION -->
* GRPC_STACKTRACE_MINLOGLEVEL
Minimum loglevel to print the stack-trace - one of DEBUG, INFO, ERROR, and NONE.
NONE is a default value.
* GRPC_STACKTRACE_MINLOGLEVEL (DEPRECATED)
This will not work anymore.
* GRPC_TRACE_FUZZER
if set, the fuzzers will output trace (it is usually suppressed).

1
doc/trace_flags.md generated

@ -90,6 +90,7 @@ accomplished by invoking `bazel build --config=dbg <target>`
- auth_context_refcount - Auth context refcounting.
- call_combiner - Call combiner state.
- call_refcount - Refcount on call.
- call_state - Traces transitions through the call spine state machine.
- closure - Legacy closure creation, scheduling, and completion.
- combiner - Combiner lock state.
- cq_refcount - Completion queue refcounting.

@ -0,0 +1,38 @@
# Copyright 2024 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
licenses(["notice"])
cc_binary(
name = "client",
srcs = ["client.cc"],
defines = ["BAZEL_BUILD"],
deps = [
"//:grpc++",
"//examples/protos:helloworld_cc_grpc",
"@com_google_absl//absl/strings:string_view",
],
)
cc_binary(
name = "server",
srcs = ["server.cc"],
defines = ["BAZEL_BUILD"],
deps = [
"//:grpc++",
"//:grpc++_reflection",
"//examples/protos:helloworld_cc_grpc",
"@com_google_absl//absl/strings:str_format",
],
)

@ -0,0 +1,73 @@
# Copyright 2024 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cmake build file for C++ retry example.
# Assumes protobuf and gRPC have been installed using cmake.
# See cmake_externalproject/CMakeLists.txt for all-in-one cmake build
# that automatically builds all the dependencies before building retry.
cmake_minimum_required(VERSION 3.8)
project(Retry C CXX)
include(../cmake/common.cmake)
# Proto file
get_filename_component(hw_proto "../../protos/helloworld.proto" ABSOLUTE)
get_filename_component(hw_proto_path "${hw_proto}" PATH)
# Generated sources
set(hw_proto_srcs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.pb.cc")
set(hw_proto_hdrs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.pb.h")
set(hw_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.grpc.pb.cc")
set(hw_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.grpc.pb.h")
add_custom_command(
OUTPUT "${hw_proto_srcs}" "${hw_proto_hdrs}" "${hw_grpc_srcs}" "${hw_grpc_hdrs}"
COMMAND ${_PROTOBUF_PROTOC}
ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
--cpp_out "${CMAKE_CURRENT_BINARY_DIR}"
-I "${hw_proto_path}"
--plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}"
"${hw_proto}"
DEPENDS "${hw_proto}")
# Include generated *.pb.h files
include_directories("${CMAKE_CURRENT_BINARY_DIR}")
# hw_grpc_proto
add_library(hw_grpc_proto
${hw_grpc_srcs}
${hw_grpc_hdrs}
${hw_proto_srcs}
${hw_proto_hdrs})
target_link_libraries(hw_grpc_proto
absl::check
${_REFLECTION}
${_GRPC_GRPCPP}
${_PROTOBUF_LIBPROTOBUF})
# Targets (client|server)
foreach(_target
client server)
add_executable(${_target} "${_target}.cc")
target_link_libraries(${_target}
hw_grpc_proto
absl::check
absl::flags
absl::flags_parse
absl::log
${_REFLECTION}
${_GRPC_GRPCPP}
${_PROTOBUF_LIBPROTOBUF})
endforeach()

@ -0,0 +1,69 @@
# Retry
This example shows how to enable and configure retry on gRPC clients.
## Documentation
[gRFC for client-side retry support](https://github.com/grpc/proposal/blob/master/A6-client-retries.md)
## Try it
This example includes a service implementation that fails requests three times with status
code `Unavailable`, then passes the fourth. The client is configured to make four retry attempts
when receiving an `Unavailable` status code.
First start the server:
```bash
$ ./server
```
Then run the client:
```bash
$ ./client
```
Expected server output:
```
Server listening on 0.0.0.0:50052
return UNAVAILABLE
return UNAVAILABLE
return UNAVAILABLE
return OK
```
Expected client output:
```
Greeter received: Hello world
```
## Usage
### Define your retry policy
Retry is enabled via the service config, which can be provided by the name resolver or
a [GRPC_ARG_SERVICE_CONFIG](https://github.com/grpc/grpc/blob/master/include/grpc/impl/channel_arg_names.h#L207-L209) channel argument. In the below config, we set retry policy for the "helloworld.Greeter" service.
`maxAttempts`: how many times to attempt the RPC before failing.
`initialBackoff`, `maxBackoff`, `backoffMultiplier`: configures delay between attempts.
`retryableStatusCodes`: Retry only when receiving these status codes.
```c++
constexpr absl::string_view kRetryPolicy =
"{\"methodConfig\" : [{"
" \"name\" : [{\"service\": \"helloworld.Greeter\"}],"
" \"waitForReady\": true,"
" \"retryPolicy\": {"
" \"maxAttempts\": 4,"
" \"initialBackoff\": \"1s\","
" \"maxBackoff\": \"120s\","
" \"backoffMultiplier\": 1.0,"
" \"retryableStatusCodes\": [\"UNAVAILABLE\"]"
" }"
"}]}";
```

@ -0,0 +1,98 @@
/*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include <grpcpp/grpcpp.h>
#include <grpcpp/support/status.h>
#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif
using grpc::Channel;
using grpc::ClientContext;
using grpc::Status;
using helloworld::Greeter;
using helloworld::HelloReply;
using helloworld::HelloRequest;
constexpr absl::string_view kTargetAddress = "localhost:50052";
// clang-format off
constexpr absl::string_view kRetryPolicy =
"{\"methodConfig\" : [{"
" \"name\" : [{\"service\": \"helloworld.Greeter\"}],"
" \"waitForReady\": true,"
" \"retryPolicy\": {"
" \"maxAttempts\": 4,"
" \"initialBackoff\": \"1s\","
" \"maxBackoff\": \"120s\","
" \"backoffMultiplier\": 1.0,"
" \"retryableStatusCodes\": [\"UNAVAILABLE\"]"
" }"
"}]}";
// clang-format on
class GreeterClient {
public:
GreeterClient(std::shared_ptr<Channel> channel)
: stub_(Greeter::NewStub(channel)) {}
// Assembles the client's payload, sends it and presents the response back
// from the server.
std::string SayHello(const std::string& user) {
// Data we are sending to the server.
HelloRequest request;
request.set_name(user);
// Container for the data we expect from the server.
HelloReply reply;
// Context for the client. It could be used to convey extra information to
// the server and/or tweak certain RPC behaviors.
ClientContext context;
// The actual RPC.
Status status = stub_->SayHello(&context, request, &reply);
// Act upon its status.
if (status.ok()) {
return reply.message();
} else {
std::cout << status.error_code() << ": " << status.error_message()
<< std::endl;
return "RPC failed";
}
}
private:
std::unique_ptr<Greeter::Stub> stub_;
};
int main() {
auto channel_args = grpc::ChannelArguments();
channel_args.SetServiceConfigJSON(std::string(kRetryPolicy));
GreeterClient greeter(grpc::CreateCustomChannel(
std::string(kTargetAddress), grpc::InsecureChannelCredentials(),
channel_args));
std::string user("world");
std::string reply = greeter.SayHello(user);
std::cout << "Greeter received: " << reply << std::endl;
return 0;
}

@ -0,0 +1,86 @@
/*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/ext/proto_server_reflection_plugin.h>
#include <grpcpp/grpcpp.h>
#include <grpcpp/health_check_service_interface.h>
#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::Status;
using grpc::StatusCode;
using helloworld::Greeter;
using helloworld::HelloReply;
using helloworld::HelloRequest;
// Logic and data behind the server's behavior.
class GreeterServiceImpl final : public Greeter::Service {
public:
Status SayHello(ServerContext* context, const HelloRequest* request,
HelloReply* reply) override {
if (++request_counter_ % request_modulo_ != 0) {
// Return an OK status for every request_modulo_ number of requests,
// return UNAVAILABLE otherwise.
std::cout << "return UNAVAILABLE" << std::endl;
return Status(StatusCode::UNAVAILABLE, "");
}
std::string prefix("Hello ");
reply->set_message(prefix + request->name());
std::cout << "return OK" << std::endl;
return Status::OK;
}
private:
static constexpr int request_modulo_ = 4;
int request_counter_ = 0;
};
void RunServer(uint16_t port) {
std::string server_address = absl::StrFormat("0.0.0.0:%d", port);
GreeterServiceImpl service;
grpc::EnableDefaultHealthCheckService(true);
grpc::reflection::InitProtoReflectionServerBuilderPlugin();
ServerBuilder builder;
// Listen on the given address without any authentication mechanism.
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
// Register "service" as the instance through which we'll communicate with
// clients. In this case it corresponds to an *synchronous* service.
builder.RegisterService(&service);
// Finally assemble the server.
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "Server listening on " << server_address << std::endl;
// Wait for the server to shutdown. Note that some other thread must be
// responsible for shutting down the server for this call to ever return.
server->Wait();
}
int main(int argc, char** argv) {
RunServer(/*port=*/50052);
return 0;
}

2
gRPC-C++.podspec generated

@ -1202,6 +1202,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',
@ -2476,6 +2477,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',

3
gRPC-Core.podspec generated

@ -1809,6 +1809,8 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.cc',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.cc',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
@ -3248,6 +3250,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/call_filters.h',
'src/core/lib/transport/call_final_info.h',
'src/core/lib/transport/call_spine.h',
'src/core/lib/transport/call_state.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/custom_metadata.h',
'src/core/lib/transport/error_utils.h',

2
grpc.gemspec generated

@ -1696,6 +1696,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/call_final_info.h )
s.files += %w( src/core/lib/transport/call_spine.cc )
s.files += %w( src/core/lib/transport/call_spine.h )
s.files += %w( src/core/lib/transport/call_state.cc )
s.files += %w( src/core/lib/transport/call_state.h )
s.files += %w( src/core/lib/transport/connectivity_state.cc )
s.files += %w( src/core/lib/transport/connectivity_state.h )
s.files += %w( src/core/lib/transport/custom_metadata.h )

2
package.xml generated

@ -1678,6 +1678,8 @@
<file baseinstalldir="/" name="src/core/lib/transport/call_final_info.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_spine.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_spine.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_state.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/custom_metadata.h" role="src" />

@ -1301,6 +1301,7 @@ grpc_cc_library(
],
external_deps = [
"absl/base:core_headers",
"absl/functional:any_invocable",
"absl/log:check",
"absl/status",
"absl/status:statusor",
@ -1343,6 +1344,7 @@ grpc_cc_library(
"handshaker/endpoint_info/endpoint_info_handshaker.h",
],
external_deps = [
"absl/functional:any_invocable",
"absl/status",
],
language = "c++",
@ -7436,6 +7438,24 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "call_state",
srcs = [
"lib/transport/call_state.cc",
],
hdrs = [
"lib/transport/call_state.h",
],
external_deps = ["absl/types:optional"],
deps = [
"activity",
"poll",
"status_flag",
"//:gpr",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "call_filters",
srcs = [
@ -7447,6 +7467,7 @@ grpc_cc_library(
external_deps = ["absl/log:check"],
deps = [
"call_final_info",
"call_state",
"dump_args",
"if",
"latch",

@ -256,20 +256,23 @@ void ChaoticGoodConnector::Connect(const Args& args, Result* result,
error);
return;
}
auto* p = self.release();
auto* chaotic_good_ext =
grpc_event_engine::experimental::QueryExtension<
grpc_event_engine::experimental::ChaoticGoodExtension>(
endpoint.value().get());
endpoint->get());
if (chaotic_good_ext != nullptr) {
chaotic_good_ext->EnableStatsCollection(/*is_control_channel=*/true);
chaotic_good_ext->UseMemoryQuota(
ResourceQuota::Default()->memory_quota());
}
auto* p = self.get();
p->handshake_mgr_->DoHandshake(
grpc_event_engine_endpoint_create(std::move(endpoint.value())),
OrphanablePtr<grpc_endpoint>(
grpc_event_engine_endpoint_create(std::move(*endpoint))),
p->args_.channel_args, p->args_.deadline, nullptr /* acceptor */,
OnHandshakeDone, p);
[self = std::move(self)](absl::StatusOr<HandshakerArgs*> result) {
self->OnHandshakeDone(std::move(result));
});
};
event_engine_->Connect(
std::move(on_connect), *resolved_addr_,
@ -280,45 +283,37 @@ void ChaoticGoodConnector::Connect(const Args& args, Result* result,
std::chrono::seconds(kTimeoutSecs));
}
void ChaoticGoodConnector::OnHandshakeDone(void* arg, grpc_error_handle error) {
auto* args = static_cast<HandshakerArgs*>(arg);
RefCountedPtr<ChaoticGoodConnector> self(
static_cast<ChaoticGoodConnector*>(args->user_data));
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
void ChaoticGoodConnector::OnHandshakeDone(
absl::StatusOr<HandshakerArgs*> result) {
// Start receiving setting frames;
{
MutexLock lock(&self->mu_);
if (!error.ok() || self->is_shutdown_) {
if (error.ok()) {
MutexLock lock(&mu_);
if (!result.ok() || is_shutdown_) {
absl::Status error = result.status();
if (result.ok()) {
error = GRPC_ERROR_CREATE("connector shutdown");
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
if (args->endpoint != nullptr) {
grpc_endpoint_destroy(args->endpoint);
}
}
self->result_->Reset();
ExecCtx::Run(DEBUG_LOCATION, std::exchange(self->notify_, nullptr),
error);
result_->Reset();
ExecCtx::Run(DEBUG_LOCATION, std::exchange(notify_, nullptr), error);
return;
}
}
if (args->endpoint != nullptr) {
if ((*result)->endpoint != nullptr) {
CHECK(grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
args->endpoint));
self->control_endpoint_ = PromiseEndpoint(
grpc_event_engine::experimental::
grpc_take_wrapped_event_engine_endpoint(args->endpoint),
SliceBuffer());
(*result)->endpoint.get()));
control_endpoint_ =
PromiseEndpoint(grpc_event_engine::experimental::
grpc_take_wrapped_event_engine_endpoint(
(*result)->endpoint.release()),
SliceBuffer());
auto activity = MakeActivity(
[self] {
[self = RefAsSubclass<ChaoticGoodConnector>()] {
return TrySeq(ControlEndpointWriteSettingsFrame(self),
ControlEndpointReadSettingsFrame(self),
[]() { return absl::OkStatus(); });
},
EventEngineWakeupScheduler(self->event_engine_),
[self](absl::Status status) {
EventEngineWakeupScheduler(event_engine_),
[self = RefAsSubclass<ChaoticGoodConnector>()](absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "ChaoticGoodConnector::OnHandshakeDone: %s",
status.ToString().c_str());
@ -338,17 +333,19 @@ void ChaoticGoodConnector::OnHandshakeDone(void* arg, grpc_error_handle error) {
status);
}
},
self->arena_, self->event_engine_.get());
MutexLock lock(&self->mu_);
if (!self->is_shutdown_) {
self->connect_activity_ = std::move(activity);
arena_, event_engine_.get());
MutexLock lock(&mu_);
if (!is_shutdown_) {
connect_activity_ = std::move(activity);
}
} else {
// Handshaking succeeded but there is no endpoint.
MutexLock lock(&self->mu_);
self->result_->Reset();
MutexLock lock(&mu_);
result_->Reset();
auto error = GRPC_ERROR_CREATE("handshake complete with empty endpoint.");
ExecCtx::Run(DEBUG_LOCATION, std::exchange(self->notify_, nullptr), error);
ExecCtx::Run(
DEBUG_LOCATION, std::exchange(notify_, nullptr),
absl::InternalError("handshake complete with empty endpoint."));
}
}

@ -77,7 +77,7 @@ class ChaoticGoodConnector : public SubchannelConnector {
RefCountedPtr<ChaoticGoodConnector> self);
static auto WaitForDataEndpointSetup(
RefCountedPtr<ChaoticGoodConnector> self);
static void OnHandshakeDone(void* arg, grpc_error_handle error);
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
RefCountedPtr<Arena> arena_ = SimpleArenaAllocator()->MakeArena();
Mutex mu_;

@ -211,9 +211,12 @@ ChaoticGoodServerListener::ActiveConnection::HandshakingState::HandshakingState(
void ChaoticGoodServerListener::ActiveConnection::HandshakingState::Start(
std::unique_ptr<EventEngine::Endpoint> endpoint) {
handshake_mgr_->DoHandshake(
grpc_event_engine_endpoint_create(std::move(endpoint)),
connection_->args(), GetConnectionDeadline(), nullptr, OnHandshakeDone,
Ref().release());
OrphanablePtr<grpc_endpoint>(
grpc_event_engine_endpoint_create(std::move(endpoint))),
connection_->args(), GetConnectionDeadline(), nullptr,
[self = Ref()](absl::StatusOr<HandshakerArgs*> result) {
self->OnHandshakeDone(std::move(result));
});
}
auto ChaoticGoodServerListener::ActiveConnection::HandshakingState::
@ -384,33 +387,28 @@ auto ChaoticGoodServerListener::ActiveConnection::HandshakingState::
}
void ChaoticGoodServerListener::ActiveConnection::HandshakingState::
OnHandshakeDone(void* arg, grpc_error_handle error) {
auto* args = static_cast<HandshakerArgs*>(arg);
CHECK_NE(args, nullptr);
RefCountedPtr<HandshakingState> self(
static_cast<HandshakingState*>(args->user_data));
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
if (!error.ok()) {
self->connection_->Done(
absl::StrCat("Handshake failed: ", StatusToString(error)));
OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
if (!result.ok()) {
connection_->Done(
absl::StrCat("Handshake failed: ", result.status().ToString()));
return;
}
if (args->endpoint == nullptr) {
self->connection_->Done("Server handshake done but has empty endpoint.");
CHECK_NE(*result, nullptr);
if ((*result)->endpoint == nullptr) {
connection_->Done("Server handshake done but has empty endpoint.");
return;
}
CHECK(grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
args->endpoint));
(*result)->endpoint.get()));
auto ee_endpoint =
grpc_event_engine::experimental::grpc_take_wrapped_event_engine_endpoint(
args->endpoint);
(*result)->endpoint.release());
auto* chaotic_good_ext = grpc_event_engine::experimental::QueryExtension<
grpc_event_engine::experimental::ChaoticGoodExtension>(ee_endpoint.get());
self->connection_->endpoint_ =
connection_->endpoint_ =
PromiseEndpoint(std::move(ee_endpoint), SliceBuffer());
auto activity = MakeActivity(
[self, chaotic_good_ext]() {
[self = Ref(), chaotic_good_ext]() {
return TrySeq(
Race(EndpointReadSettingsFrame(self),
TrySeq(Sleep(Timestamp::Now() + kConnectionDeadline),
@ -430,8 +428,8 @@ void ChaoticGoodServerListener::ActiveConnection::HandshakingState::
return EndpointWriteSettingsFrame(self, is_control_endpoint);
});
},
EventEngineWakeupScheduler(self->connection_->listener_->event_engine_),
[self](absl::Status status) {
EventEngineWakeupScheduler(connection_->listener_->event_engine_),
[self = Ref()](absl::Status status) {
if (!status.ok()) {
self->connection_->Done(
absl::StrCat("Server setting frame handling failed: ",
@ -440,11 +438,10 @@ void ChaoticGoodServerListener::ActiveConnection::HandshakingState::
self->connection_->Done();
}
},
self->connection_->arena_.get(),
self->connection_->listener_->event_engine_.get());
MutexLock lock(&self->connection_->mu_);
if (self->connection_->orphaned_) return;
self->connection_->receive_settings_activity_ = std::move(activity);
connection_->arena_.get(), connection_->listener_->event_engine_.get());
MutexLock lock(&connection_->mu_);
if (connection_->orphaned_) return;
connection_->receive_settings_activity_ = std::move(activity);
}
Timestamp ChaoticGoodServerListener::ActiveConnection::HandshakingState::

@ -104,7 +104,7 @@ class ChaoticGoodServerListener final : public Server::ListenerInterface {
static auto DataEndpointWriteSettingsFrame(
RefCountedPtr<HandshakingState> self);
static void OnHandshakeDone(void* arg, grpc_error_handle error);
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
Timestamp GetConnectionDeadline();
const RefCountedPtr<ActiveConnection> connection_;
const RefCountedPtr<HandshakeManager> handshake_mgr_;

@ -120,10 +120,12 @@ void Chttp2Connector::Connect(const Args& args, Result* result,
CoreConfiguration::Get().handshaker_registry().AddHandshakers(
HANDSHAKER_CLIENT, channel_args, args_.interested_parties,
handshake_mgr_.get());
Ref().release(); // Ref held by OnHandshakeDone().
handshake_mgr_->DoHandshake(nullptr /* endpoint */, channel_args,
args.deadline, nullptr /* acceptor */,
OnHandshakeDone, this);
handshake_mgr_->DoHandshake(
/*endpoint=*/nullptr, channel_args, args.deadline, /*acceptor=*/nullptr,
[self = RefAsSubclass<Chttp2Connector>()](
absl::StatusOr<HandshakerArgs*> result) {
self->OnHandshakeDone(std::move(result));
});
}
void Chttp2Connector::Shutdown(grpc_error_handle error) {
@ -135,54 +137,42 @@ void Chttp2Connector::Shutdown(grpc_error_handle error) {
}
}
void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
auto* args = static_cast<HandshakerArgs*>(arg);
Chttp2Connector* self = static_cast<Chttp2Connector*>(args->user_data);
{
MutexLock lock(&self->mu_);
if (!error.ok() || self->shutdown_) {
if (error.ok()) {
error = GRPC_ERROR_CREATE("connector shutdown");
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
if (args->endpoint != nullptr) {
grpc_endpoint_destroy(args->endpoint);
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
}
}
self->result_->Reset();
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
} else if (args->endpoint != nullptr) {
self->result_->transport =
grpc_create_chttp2_transport(args->args, args->endpoint, true);
CHECK_NE(self->result_->transport, nullptr);
self->result_->socket_node =
grpc_chttp2_transport_get_socket_node(self->result_->transport);
self->result_->channel_args = args->args;
self->Ref().release(); // Ref held by OnReceiveSettings()
GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings, self,
grpc_schedule_on_exec_ctx);
grpc_chttp2_transport_start_reading(
self->result_->transport, args->read_buffer,
&self->on_receive_settings_, self->args_.interested_parties, nullptr);
self->timer_handle_ = self->event_engine_->RunAfter(
self->args_.deadline - Timestamp::Now(),
[self = self->RefAsSubclass<Chttp2Connector>()] {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnTimeout();
});
} else {
// If the handshaking succeeded but there is no endpoint, then the
// handshaker may have handed off the connection to some external
// code. Just verify that exit_early flag is set.
DCHECK(args->exit_early);
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
void Chttp2Connector::OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
MutexLock lock(&mu_);
if (!result.ok() || shutdown_) {
if (result.ok()) {
result = GRPC_ERROR_CREATE("connector shutdown");
}
self->handshake_mgr_.reset();
result_->Reset();
NullThenSchedClosure(DEBUG_LOCATION, &notify_, result.status());
} else if ((*result)->endpoint != nullptr) {
result_->transport = grpc_create_chttp2_transport(
(*result)->args, std::move((*result)->endpoint), true);
CHECK_NE(result_->transport, nullptr);
result_->socket_node =
grpc_chttp2_transport_get_socket_node(result_->transport);
result_->channel_args = std::move((*result)->args);
Ref().release(); // Ref held by OnReceiveSettings()
GRPC_CLOSURE_INIT(&on_receive_settings_, OnReceiveSettings, this,
grpc_schedule_on_exec_ctx);
grpc_chttp2_transport_start_reading(
result_->transport, (*result)->read_buffer.c_slice_buffer(),
&on_receive_settings_, args_.interested_parties, nullptr);
timer_handle_ =
event_engine_->RunAfter(args_.deadline - Timestamp::Now(),
[self = RefAsSubclass<Chttp2Connector>()] {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnTimeout();
});
} else {
// If the handshaking succeeded but there is no endpoint, then the
// handshaker may have handed off the connection to some external
// code. Just verify that exit_early flag is set.
DCHECK((*result)->exit_early);
NullThenSchedClosure(DEBUG_LOCATION, &notify_, result.status());
}
self->Unref();
handshake_mgr_.reset();
}
void Chttp2Connector::OnReceiveSettings(void* arg, grpc_error_handle error) {
@ -380,12 +370,12 @@ grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
int flags = fcntl(fd, F_GETFL, 0);
CHECK_EQ(fcntl(fd, F_SETFL, flags | O_NONBLOCK), 0);
grpc_endpoint* client = grpc_tcp_create_from_fd(
grpc_core::OrphanablePtr<grpc_endpoint> client(grpc_tcp_create_from_fd(
grpc_fd_create(fd, "client", true),
grpc_event_engine::experimental::ChannelArgsEndpointConfig(final_args),
"fd-client");
"fd-client"));
grpc_core::Transport* transport =
grpc_create_chttp2_transport(final_args, client, true);
grpc_create_chttp2_transport(final_args, std::move(client), true);
CHECK(transport);
auto channel = grpc_core::ChannelCreate(
target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);

@ -41,7 +41,7 @@ class Chttp2Connector : public SubchannelConnector {
void Shutdown(grpc_error_handle error) override;
private:
static void OnHandshakeDone(void* arg, grpc_error_handle error);
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
static void OnReceiveSettings(void* arg, grpc_error_handle error);
void OnTimeout() ABSL_LOCKS_EXCLUDED(mu_);

@ -107,6 +107,13 @@ const char kUnixUriPrefix[] = "unix:";
const char kUnixAbstractUriPrefix[] = "unix-abstract:";
const char kVSockUriPrefix[] = "vsock:";
struct AcceptorDeleter {
void operator()(grpc_tcp_server_acceptor* acceptor) const {
gpr_free(acceptor);
}
};
using AcceptorPtr = std::unique_ptr<grpc_tcp_server_acceptor, AcceptorDeleter>;
class Chttp2ServerListener : public Server::ListenerInterface {
public:
static grpc_error_handle Create(Server* server, grpc_resolved_address* addr,
@ -167,15 +174,15 @@ class Chttp2ServerListener : public Server::ListenerInterface {
class HandshakingState : public InternallyRefCounted<HandshakingState> {
public:
HandshakingState(RefCountedPtr<ActiveConnection> connection_ref,
grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor,
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
const ChannelArgs& args);
~HandshakingState() override;
void Orphan() override;
void Start(grpc_endpoint* endpoint, const ChannelArgs& args);
void Start(OrphanablePtr<grpc_endpoint> endpoint,
const ChannelArgs& args);
// Needed to be able to grab an external ref in
// ActiveConnection::Start()
@ -184,10 +191,10 @@ class Chttp2ServerListener : public Server::ListenerInterface {
private:
void OnTimeout() ABSL_LOCKS_EXCLUDED(&connection_->mu_);
static void OnReceiveSettings(void* arg, grpc_error_handle /* error */);
static void OnHandshakeDone(void* arg, grpc_error_handle error);
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
RefCountedPtr<ActiveConnection> const connection_;
grpc_pollset* const accepting_pollset_;
grpc_tcp_server_acceptor* acceptor_;
AcceptorPtr acceptor_;
RefCountedPtr<HandshakeManager> handshake_mgr_
ABSL_GUARDED_BY(&connection_->mu_);
// State for enforcing handshake timeout on receiving HTTP/2 settings.
@ -198,8 +205,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
grpc_pollset_set* const interested_parties_;
};
ActiveConnection(grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor,
ActiveConnection(grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
EventEngine* event_engine, const ChannelArgs& args,
MemoryOwner memory_owner);
~ActiveConnection() override;
@ -209,7 +215,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
void SendGoAway();
void Start(RefCountedPtr<Chttp2ServerListener> listener,
grpc_endpoint* endpoint, const ChannelArgs& args);
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& args);
// Needed to be able to grab an external ref in
// Chttp2ServerListener::OnAccept()
@ -367,11 +373,11 @@ Timestamp GetConnectionDeadline(const ChannelArgs& args) {
Chttp2ServerListener::ActiveConnection::HandshakingState::HandshakingState(
RefCountedPtr<ActiveConnection> connection_ref,
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
const ChannelArgs& args)
: connection_(std::move(connection_ref)),
accepting_pollset_(accepting_pollset),
acceptor_(acceptor),
acceptor_(std::move(acceptor)),
handshake_mgr_(MakeRefCounted<HandshakeManager>()),
deadline_(GetConnectionDeadline(args)),
interested_parties_(grpc_pollset_set_create()) {
@ -387,7 +393,6 @@ Chttp2ServerListener::ActiveConnection::HandshakingState::~HandshakingState() {
grpc_pollset_set_del_pollset(interested_parties_, accepting_pollset_);
}
grpc_pollset_set_destroy(interested_parties_);
gpr_free(acceptor_);
}
void Chttp2ServerListener::ActiveConnection::HandshakingState::Orphan() {
@ -401,16 +406,18 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::Orphan() {
}
void Chttp2ServerListener::ActiveConnection::HandshakingState::Start(
grpc_endpoint* endpoint, const ChannelArgs& channel_args) {
Ref().release(); // Held by OnHandshakeDone
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& channel_args) {
RefCountedPtr<HandshakeManager> handshake_mgr;
{
MutexLock lock(&connection_->mu_);
if (handshake_mgr_ == nullptr) return;
handshake_mgr = handshake_mgr_;
}
handshake_mgr->DoHandshake(endpoint, channel_args, deadline_, acceptor_,
OnHandshakeDone, this);
handshake_mgr->DoHandshake(
std::move(endpoint), channel_args, deadline_, acceptor_.get(),
[self = Ref()](absl::StatusOr<HandshakerArgs*> result) {
self->OnHandshakeDone(std::move(result));
});
}
void Chttp2ServerListener::ActiveConnection::HandshakingState::OnTimeout() {
@ -444,61 +451,50 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::
}
void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
void* arg, grpc_error_handle error) {
auto* args = static_cast<HandshakerArgs*>(arg);
HandshakingState* self = static_cast<HandshakingState*>(args->user_data);
absl::StatusOr<HandshakerArgs*> result) {
OrphanablePtr<HandshakingState> handshaking_state_ref;
RefCountedPtr<HandshakeManager> handshake_mgr;
bool cleanup_connection = false;
bool release_connection = false;
{
MutexLock connection_lock(&self->connection_->mu_);
if (!error.ok() || self->connection_->shutdown_) {
std::string error_str = StatusToString(error);
MutexLock connection_lock(&connection_->mu_);
if (!result.ok() || connection_->shutdown_) {
cleanup_connection = true;
release_connection = true;
if (error.ok() && args->endpoint != nullptr) {
// We were shut down or stopped serving after handshaking completed
// successfully, so destroy the endpoint here.
grpc_endpoint_destroy(args->endpoint);
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
}
} else {
// If the handshaking succeeded but there is no endpoint, then the
// handshaker may have handed off the connection to some external
// code, so we can just clean up here without creating a transport.
if (args->endpoint != nullptr) {
if ((*result)->endpoint != nullptr) {
RefCountedPtr<Transport> transport =
grpc_create_chttp2_transport(args->args, args->endpoint, false)
grpc_create_chttp2_transport((*result)->args,
std::move((*result)->endpoint), false)
->Ref();
grpc_error_handle channel_init_err =
self->connection_->listener_->server_->SetupTransport(
transport.get(), self->accepting_pollset_, args->args,
connection_->listener_->server_->SetupTransport(
transport.get(), accepting_pollset_, (*result)->args,
grpc_chttp2_transport_get_socket_node(transport.get()));
if (channel_init_err.ok()) {
// Use notify_on_receive_settings callback to enforce the
// handshake deadline.
self->connection_->transport_ =
connection_->transport_ =
DownCast<grpc_chttp2_transport*>(transport.get())->Ref();
self->Ref().release(); // Held by OnReceiveSettings().
GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings,
self, grpc_schedule_on_exec_ctx);
Ref().release(); // Held by OnReceiveSettings().
GRPC_CLOSURE_INIT(&on_receive_settings_, OnReceiveSettings, this,
grpc_schedule_on_exec_ctx);
// If the listener has been configured with a config fetcher, we
// need to watch on the transport being closed so that we can an
// updated list of active connections.
grpc_closure* on_close = nullptr;
if (self->connection_->listener_->config_fetcher_watcher_ !=
nullptr) {
if (connection_->listener_->config_fetcher_watcher_ != nullptr) {
// Refs helds by OnClose()
self->connection_->Ref().release();
on_close = &self->connection_->on_close_;
connection_->Ref().release();
on_close = &connection_->on_close_;
} else {
// Remove the connection from the connections_ map since OnClose()
// will not be invoked when a config fetcher is set.
auto connection_quota =
self->connection_->listener_->connection_quota_->Ref()
.release();
connection_->listener_->connection_quota_->Ref().release();
auto on_close_transport = [](void* arg,
grpc_error_handle /*handle*/) {
ConnectionQuota* connection_quota =
@ -511,11 +507,10 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
cleanup_connection = true;
}
grpc_chttp2_transport_start_reading(
transport.get(), args->read_buffer, &self->on_receive_settings_,
nullptr, on_close);
self->timer_handle_ = self->connection_->event_engine_->RunAfter(
self->deadline_ - Timestamp::Now(),
[self = self->Ref()]() mutable {
transport.get(), (*result)->read_buffer.c_slice_buffer(),
&on_receive_settings_, nullptr, on_close);
timer_handle_ = connection_->event_engine_->RunAfter(
deadline_ - Timestamp::Now(), [self = Ref()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnTimeout();
@ -527,8 +522,6 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
LOG(ERROR) << "Failed to create channel: "
<< StatusToString(channel_init_err);
transport->Orphan();
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
cleanup_connection = true;
release_connection = true;
}
@ -541,25 +534,21 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// shutdown the handshake when the listener needs to stop serving.
// Avoid calling the destructor of HandshakeManager and HandshakingState
// from within the critical region.
handshake_mgr = std::move(self->handshake_mgr_);
handshaking_state_ref = std::move(self->connection_->handshaking_state_);
handshake_mgr = std::move(handshake_mgr_);
handshaking_state_ref = std::move(connection_->handshaking_state_);
}
gpr_free(self->acceptor_);
self->acceptor_ = nullptr;
OrphanablePtr<ActiveConnection> connection;
if (cleanup_connection) {
MutexLock listener_lock(&self->connection_->listener_->mu_);
MutexLock listener_lock(&connection_->listener_->mu_);
if (release_connection) {
self->connection_->listener_->connection_quota_->ReleaseConnections(1);
connection_->listener_->connection_quota_->ReleaseConnections(1);
}
auto it = self->connection_->listener_->connections_.find(
self->connection_.get());
if (it != self->connection_->listener_->connections_.end()) {
auto it = connection_->listener_->connections_.find(connection_.get());
if (it != connection_->listener_->connections_.end()) {
connection = std::move(it->second);
self->connection_->listener_->connections_.erase(it);
connection_->listener_->connections_.erase(it);
}
}
self->Unref();
}
//
@ -567,11 +556,11 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
//
Chttp2ServerListener::ActiveConnection::ActiveConnection(
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
EventEngine* event_engine, const ChannelArgs& args,
MemoryOwner memory_owner)
: handshaking_state_(memory_owner.MakeOrphanable<HandshakingState>(
Ref(), accepting_pollset, acceptor, args)),
Ref(), accepting_pollset, std::move(acceptor), args)),
event_engine_(event_engine) {
GRPC_CLOSURE_INIT(&on_close_, ActiveConnection::OnClose, this,
grpc_schedule_on_exec_ctx);
@ -625,29 +614,24 @@ void Chttp2ServerListener::ActiveConnection::SendGoAway() {
}
void Chttp2ServerListener::ActiveConnection::Start(
RefCountedPtr<Chttp2ServerListener> listener, grpc_endpoint* endpoint,
const ChannelArgs& args) {
RefCountedPtr<HandshakingState> handshaking_state_ref;
RefCountedPtr<Chttp2ServerListener> listener,
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& args) {
listener_ = std::move(listener);
if (listener_->tcp_server_ != nullptr) {
grpc_tcp_server_ref(listener_->tcp_server_);
}
RefCountedPtr<HandshakingState> handshaking_state_ref;
{
ReleasableMutexLock lock(&mu_);
if (shutdown_) {
lock.Release();
// If the Connection is already shutdown at this point, it implies the
// owning Chttp2ServerListener and all associated ActiveConnections have
// been orphaned. The generated endpoints need to be shutdown here to
// ensure the tcp connections are closed appropriately.
grpc_endpoint_destroy(endpoint);
return;
}
MutexLock lock(&mu_);
// If the Connection is already shutdown at this point, it implies the
// owning Chttp2ServerListener and all associated ActiveConnections have
// been orphaned.
if (shutdown_) return;
// Hold a ref to HandshakingState to allow starting the handshake outside
// the critical region.
handshaking_state_ref = handshaking_state_->Ref();
}
handshaking_state_ref->Start(endpoint, args);
handshaking_state_ref->Start(std::move(endpoint), args);
}
void Chttp2ServerListener::ActiveConnection::OnClose(
@ -841,48 +825,41 @@ void Chttp2ServerListener::AcceptConnectedEndpoint(
void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor) {
grpc_tcp_server_acceptor* server_acceptor) {
Chttp2ServerListener* self = static_cast<Chttp2ServerListener*>(arg);
ChannelArgs args = self->args_;
OrphanablePtr<grpc_endpoint> endpoint(tcp);
AcceptorPtr acceptor(server_acceptor);
RefCountedPtr<grpc_server_config_fetcher::ConnectionManager>
connection_manager;
{
MutexLock lock(&self->mu_);
connection_manager = self->connection_manager_;
}
auto endpoint_cleanup = [&]() {
grpc_endpoint_destroy(tcp);
gpr_free(acceptor);
};
if (!self->connection_quota_->AllowIncomingConnection(
self->memory_quota_, grpc_endpoint_get_peer(tcp))) {
endpoint_cleanup();
self->memory_quota_, grpc_endpoint_get_peer(endpoint.get()))) {
return;
}
if (self->config_fetcher_ != nullptr) {
if (connection_manager == nullptr) {
endpoint_cleanup();
return;
}
absl::StatusOr<ChannelArgs> args_result =
connection_manager->UpdateChannelArgsForConnection(args, tcp);
if (!args_result.ok()) {
endpoint_cleanup();
return;
}
grpc_error_handle error;
args = self->args_modifier_(*args_result, &error);
if (!error.ok()) {
endpoint_cleanup();
return;
}
}
auto memory_owner = self->memory_quota_->CreateMemoryOwner();
EventEngine* const event_engine = self->args_.GetObject<EventEngine>();
auto connection = memory_owner.MakeOrphanable<ActiveConnection>(
accepting_pollset, acceptor, event_engine, args, std::move(memory_owner));
// We no longer own acceptor
acceptor = nullptr;
accepting_pollset, std::move(acceptor), event_engine, args,
std::move(memory_owner));
// Hold a ref to connection to allow starting handshake outside the
// critical region
RefCountedPtr<ActiveConnection> connection_ref = connection->Ref();
@ -902,10 +879,8 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
self->connections_.emplace(connection.get(), std::move(connection));
}
}
if (connection != nullptr) {
endpoint_cleanup();
} else {
connection_ref->Start(std::move(listener_ref), tcp, args);
if (connection == nullptr) {
connection_ref->Start(std::move(listener_ref), std::move(endpoint), args);
}
}
@ -1161,15 +1136,17 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
std::string name = absl::StrCat("fd:", fd);
auto memory_quota =
server_args.GetObject<grpc_core::ResourceQuota>()->memory_quota();
grpc_endpoint* server_endpoint = grpc_tcp_create_from_fd(
grpc_fd_create(fd, name.c_str(), true),
grpc_event_engine::experimental::ChannelArgsEndpointConfig(server_args),
name);
grpc_core::OrphanablePtr<grpc_endpoint> server_endpoint(
grpc_tcp_create_from_fd(
grpc_fd_create(fd, name.c_str(), true),
grpc_event_engine::experimental::ChannelArgsEndpointConfig(
server_args),
name));
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(server_endpoint, pollset);
grpc_endpoint_add_to_pollset(server_endpoint.get(), pollset);
}
grpc_core::Transport* transport = grpc_create_chttp2_transport(
server_args, server_endpoint, false // is_client
server_args, std::move(server_endpoint), false // is_client
);
grpc_error_handle error =
core_server->SetupTransport(transport, nullptr, server_args, nullptr);

@ -53,7 +53,6 @@
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
@ -84,6 +83,7 @@
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
@ -378,8 +378,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
channelz_socket.reset();
}
if (ep != nullptr) grpc_endpoint_destroy(ep);
grpc_slice_buffer_destroy(&qbuf);
grpc_error_handle error = GRPC_ERROR_CREATE("Transport destroyed");
@ -420,9 +418,9 @@ static void read_channel_args(grpc_chttp2_transport* t,
channel_args.GetInt(GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER).value_or(-1);
if (initial_sequence_number > 0) {
if ((t->next_stream_id & 1) != (initial_sequence_number & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1,
is_client ? "client" : "server");
LOG(ERROR) << GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
<< ": low bit must be " << (t->next_stream_id & 1) << " on "
<< (is_client ? "client" : "server");
} else {
t->next_stream_id = static_cast<uint32_t>(initial_sequence_number);
}
@ -495,7 +493,7 @@ static void read_channel_args(grpc_chttp2_transport* t,
.value_or(GRPC_ENABLE_CHANNELZ_DEFAULT)) {
t->channelz_socket =
grpc_core::MakeRefCounted<grpc_core::channelz::SocketNode>(
std::string(grpc_endpoint_get_local_address(t->ep)),
std::string(grpc_endpoint_get_local_address(t->ep.get())),
std::string(t->peer_string.as_string_view()),
absl::StrCat(t->GetTransportName(), " ",
t->peer_string.as_string_view()),
@ -528,8 +526,8 @@ static void read_channel_args(grpc_chttp2_transport* t,
t->max_concurrent_streams_policy.SetTarget(value);
}
} else if (channel_args.Contains(GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
gpr_log(GPR_DEBUG, "%s is not available on clients",
GRPC_ARG_MAX_CONCURRENT_STREAMS);
VLOG(2) << GRPC_ARG_MAX_CONCURRENT_STREAMS
<< " is not available on clients";
}
value =
channel_args.GetInt(GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER).value_or(-1);
@ -589,11 +587,11 @@ using grpc_event_engine::experimental::QueryExtension;
using grpc_event_engine::experimental::TcpTraceExtension;
grpc_chttp2_transport::grpc_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client)
: ep(ep),
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> endpoint, bool is_client)
: ep(std::move(endpoint)),
peer_string(
grpc_core::Slice::FromCopiedString(grpc_endpoint_get_peer(ep))),
grpc_core::Slice::FromCopiedString(grpc_endpoint_get_peer(ep.get()))),
memory_owner(channel_args.GetObject<grpc_core::ResourceQuota>()
->memory_quota()
->CreateMemoryOwner()),
@ -617,10 +615,11 @@ grpc_chttp2_transport::grpc_chttp2_transport(
context_list = new grpc_core::ContextList();
if (channel_args.GetBool(GRPC_ARG_TCP_TRACING_ENABLED).value_or(false) &&
grpc_event_engine::experimental::grpc_is_event_engine_endpoint(ep)) {
grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
ep.get())) {
auto epte = QueryExtension<TcpTraceExtension>(
grpc_event_engine::experimental::grpc_get_wrapped_event_engine_endpoint(
ep));
ep.get()));
if (epte != nullptr) {
epte->InitializeAndReturnTcpTracer();
}
@ -763,17 +762,16 @@ static void close_transport_locked(grpc_chttp2_transport* t,
CHECK(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE);
if (t->interested_parties_until_recv_settings != nullptr) {
grpc_endpoint_delete_from_pollset_set(
t->ep, t->interested_parties_until_recv_settings);
t->ep.get(), t->interested_parties_until_recv_settings);
t->interested_parties_until_recv_settings = nullptr;
}
grpc_core::MutexLock lock(&t->ep_destroy_mu);
grpc_endpoint_destroy(t->ep);
t->ep = nullptr;
t->ep.reset();
}
if (t->notify_on_receive_settings != nullptr) {
if (t->interested_parties_until_recv_settings != nullptr) {
grpc_endpoint_delete_from_pollset_set(
t->ep, t->interested_parties_until_recv_settings);
t->ep.get(), t->interested_parties_until_recv_settings);
t->interested_parties_until_recv_settings = nullptr;
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION, t->notify_on_receive_settings,
@ -927,11 +925,11 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "W:%p %s [%s] state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
std::string(t->peer_string.as_string_view()).c_str(),
write_state_name(t->write_state), write_state_name(st), reason));
GRPC_TRACE_LOG(http, INFO)
<< "W:" << t << " " << (t->is_client ? "CLIENT" : "SERVER") << " ["
<< t->peer_string.as_string_view() << "] state "
<< write_state_name(t->write_state) << " -> " << write_state_name(st)
<< " [" << reason << "]";
t->write_state = st;
// If the state is being reset back to idle, it means a write was just
// finished. Make sure all the run_after_write closures are scheduled.
@ -1021,11 +1019,10 @@ static void write_action_begin_locked(
// We had paused reading, because we had many induced frames (SETTINGS
// ACK, PINGS ACK and RST_STREAMS) pending in t->qbuf. Now that we have
// been able to flush qbuf, we can resume reading.
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO,
"transport %p : Resuming reading after being paused due to too "
"many unwritten SETTINGS ACK, PINGS ACK and RST_STREAM frames",
t.get()));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t.get()
<< " : Resuming reading after being paused due to too many unwritten "
"SETTINGS ACK, PINGS ACK and RST_STREAM frames";
t->reading_paused_on_pending_induced_frames = false;
continue_read_action_locked(std::move(t));
}
@ -1061,7 +1058,7 @@ static void write_action(grpc_chttp2_transport* t) {
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t << "]: Write "
<< t->outbuf.Length() << " bytes";
t->write_size_policy.BeginWrite(t->outbuf.Length());
grpc_endpoint_write(t->ep, t->outbuf.c_slice_buffer(),
grpc_endpoint_write(t->ep.get(), t->outbuf.c_slice_buffer(),
grpc_core::InitTransportClosure<write_action_end>(
t->Ref(), &t->write_action_end_locked),
cl, max_frame_size);
@ -1152,15 +1149,15 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
static_cast<intptr_t>(goaway_error)),
grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE);
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "transport %p got goaway with last stream id %d", t,
last_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t << " got goaway with last stream id "
<< last_stream_id;
// We want to log this irrespective of whether http tracing is enabled if we
// received a GOAWAY with a non NO_ERROR code.
if (goaway_error != GRPC_HTTP2_NO_ERROR) {
gpr_log(GPR_INFO, "%s: Got goaway [%d] err=%s",
std::string(t->peer_string.as_string_view()).c_str(), goaway_error,
grpc_core::StatusToString(t->goaway_error).c_str());
LOG(INFO) << t->peer_string.as_string_view() << ": Got goaway ["
<< goaway_error
<< "] err=" << grpc_core::StatusToString(t->goaway_error);
}
if (t->is_client) {
cancel_unstarted_streams(t, t->goaway_error, false);
@ -1186,12 +1183,11 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
if (GPR_UNLIKELY(t->is_client &&
goaway_error == GRPC_HTTP2_ENHANCE_YOUR_CALM &&
goaway_text == "too_many_pings")) {
gpr_log(GPR_ERROR,
"%s: Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\". Current keepalive time (before "
"throttling): %s",
std::string(t->peer_string.as_string_view()).c_str(),
t->keepalive_time.ToString().c_str());
LOG(ERROR) << t->peer_string.as_string_view()
<< ": Received a GOAWAY with error code ENHANCE_YOUR_CALM and "
"debug data equal to \"too_many_pings\". Current keepalive "
"time (before throttling): "
<< t->keepalive_time.ToString();
constexpr int max_keepalive_time_millis =
INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
int64_t throttled_keepalive_time =
@ -1223,10 +1219,10 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
t->stream_map.size() < t->settings.peer().max_concurrent_streams() &&
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
// safe since we can't (legally) be parsing this stream yet
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO,
"HTTP:%s: Transport %p allocating new grpc_chttp2_stream %p to id %d",
t->is_client ? "CLI" : "SVR", t, s, t->next_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "HTTP:" << (t->is_client ? "CLI" : "SVR") << ": Transport " << t
<< " allocating new grpc_chttp2_stream " << s << " to id "
<< t->next_stream_id;
CHECK_EQ(s->id, 0u);
s->id = t->next_stream_id;
@ -1289,17 +1285,13 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s whence=%s:%d",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_core::StatusToString(error).c_str(),
write_state_name(t->write_state), whence.file(), whence.line());
LOG(INFO) << "complete_closure_step: t=" << t << " " << closure << " refs="
<< (closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT)
<< " flags="
<< (closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT)
<< " desc=" << desc << " err=" << grpc_core::StatusToString(error)
<< " write_state=" << write_state_name(t->write_state)
<< " whence=" << whence.file() << ":" << whence.line();
}
if (!error.ok()) {
@ -1342,7 +1334,7 @@ static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
const std::string prefix = absl::StrCat(
"HTTP:", id, is_initial ? ":HDR" : ":TRL", is_client ? ":CLI:" : ":SVR:");
md_batch->Log([&prefix](absl::string_view key, absl::string_view value) {
VLOG(2) << absl::StrCat(prefix, key, ": ", value);
VLOG(2) << prefix << key << ": " << value;
});
}
@ -1359,10 +1351,9 @@ static void perform_stream_op_locked(void* stream_op,
s->call_tracer = CallTracerIfSampled(s);
s->tcp_tracer = TcpTracerIfSampled(s);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO,
"perform_stream_op_locked[s=%p; op=%p]: %s; on_complete = %p", s,
op, grpc_transport_stream_op_batch_string(op, false).c_str(),
op->on_complete);
LOG(INFO) << "perform_stream_op_locked[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false)
<< "; on_complete = " << op->on_complete;
if (op->send_initial_metadata) {
log_metadata(op_payload->send_initial_metadata.send_initial_metadata,
s->id, t->is_client, true);
@ -1626,8 +1617,8 @@ void grpc_chttp2_transport::PerformStreamOp(
}
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_stream_op[s=%p; op=%p]: %s", s, op,
grpc_transport_stream_op_batch_string(op, false).c_str());
LOG(INFO) << "perform_stream_op[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
}
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
@ -1638,8 +1629,8 @@ void grpc_chttp2_transport::PerformStreamOp(
}
static void cancel_pings(grpc_chttp2_transport* t, grpc_error_handle error) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "%p CANCEL PINGS: %s", t,
grpc_core::StatusToString(error).c_str()));
GRPC_TRACE_LOG(http, INFO)
<< t << " CANCEL PINGS: " << grpc_core::StatusToString(error);
// callback remaining pings: they're not allowed to call into the transport,
// and maybe they hold resources that need to be freed
t->ping_callbacks.CancelAll(t->event_engine.get());
@ -1722,8 +1713,8 @@ static void retry_initiate_ping_locked(
void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
if (!t->ping_callbacks.AckPing(id, t->event_engine.get())) {
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64,
std::string(t->peer_string.as_string_view()).c_str(), id);
VLOG(2) << "Unknown ping response from " << t->peer_string.as_string_view()
<< ": " << id;
return;
}
if (t->ping_callbacks.ping_requested()) {
@ -1733,65 +1724,62 @@ void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
void grpc_chttp2_keepalive_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Keepalive timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("keepalive_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("keepalive timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Keepalive timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("keepalive_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("keepalive timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
void grpc_chttp2_ping_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Ping timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("ping_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("ping timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Ping timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("ping_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("ping timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
void grpc_chttp2_settings_timeout(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
t->combiner->Run(
grpc_core::NewClosure([t](grpc_error_handle) {
gpr_log(GPR_INFO, "%s: Settings timeout. Closing transport.",
std::string(t->peer_string.as_string_view()).c_str());
send_goaway(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("settings_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_SETTINGS_TIMEOUT),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("settings timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
t->combiner->Run(grpc_core::NewClosure([t](grpc_error_handle) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Settings timeout. Closing transport.";
send_goaway(t.get(),
grpc_error_set_int(
GRPC_ERROR_CREATE("settings_timeout"),
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_SETTINGS_TIMEOUT),
/*immediate_disconnect_hint=*/true);
close_transport_locked(
t.get(), grpc_error_set_int(
GRPC_ERROR_CREATE("settings timeout"),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_UNAVAILABLE));
}),
absl::OkStatus());
}
namespace {
@ -1828,22 +1816,21 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
return;
}
if (t_->destroying || !t_->closed_with_error.ok()) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport:%p %s peer:%s Transport already shutting down. "
"Graceful GOAWAY abandoned.",
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
std::string(t_->peer_string.as_string_view()).c_str()));
GRPC_TRACE_LOG(http, INFO) << "transport:" << t_.get() << " "
<< (t_->is_client ? "CLIENT" : "SERVER")
<< " peer:" << t_->peer_string.as_string_view()
<< " Transport already shutting down. "
"Graceful GOAWAY abandoned.";
return;
}
// Ping completed. Send final goaway.
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport:%p %s peer:%s Graceful shutdown: Ping received. "
"Sending final GOAWAY with stream_id:%d",
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
std::string(t_->peer_string.as_string_view()).c_str(),
t_->last_new_stream_id));
GRPC_TRACE_LOG(http, INFO)
<< "transport:" << t_.get() << " "
<< (t_->is_client ? "CLIENT" : "SERVER")
<< " peer:" << std::string(t_->peer_string.as_string_view())
<< " Graceful shutdown: Ping received. "
"Sending final GOAWAY with stream_id:"
<< t_->last_new_stream_id;
t_->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(t_->last_new_stream_id, 0, grpc_empty_slice(),
&t_->qbuf);
@ -1887,10 +1874,10 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error,
} else if (t->sent_goaway_state == GRPC_CHTTP2_NO_GOAWAY_SEND ||
t->sent_goaway_state == GRPC_CHTTP2_GRACEFUL_GOAWAY) {
// We want to log this irrespective of whether http tracing is enabled
gpr_log(GPR_DEBUG, "%s %s: Sending goaway last_new_stream_id=%d err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
t->is_client ? "CLIENT" : "SERVER", t->last_new_stream_id,
grpc_core::StatusToString(error).c_str());
VLOG(2) << t->peer_string.as_string_view() << " "
<< (t->is_client ? "CLIENT" : "SERVER")
<< ": Sending goaway last_new_stream_id=" << t->last_new_stream_id
<< " err=" << grpc_core::StatusToString(error);
t->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(
t->last_new_stream_id, static_cast<uint32_t>(http_error),
@ -1939,13 +1926,13 @@ static void perform_transport_op_locked(void* stream_op,
if (op->bind_pollset) {
if (t->ep != nullptr) {
grpc_endpoint_add_to_pollset(t->ep, op->bind_pollset);
grpc_endpoint_add_to_pollset(t->ep.get(), op->bind_pollset);
}
}
if (op->bind_pollset_set) {
if (t->ep != nullptr) {
grpc_endpoint_add_to_pollset_set(t->ep, op->bind_pollset_set);
grpc_endpoint_add_to_pollset_set(t->ep.get(), op->bind_pollset_set);
}
}
@ -1974,8 +1961,8 @@ static void perform_transport_op_locked(void* stream_op,
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", this,
grpc_transport_op_string(op).c_str());
LOG(INFO) << "perform_transport_op[t=" << this
<< "]: " << grpc_transport_op_string(op);
}
op->handler_private.extra_arg = this;
Ref().release()->combiner->Run(
@ -2027,10 +2014,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
// exited out of at any point by returning.
[&]() {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_message %p final_metadata_requested=%d "
"seen_error=%d",
s, s->final_metadata_requested, s->seen_error);
VLOG(2) << "maybe_complete_recv_message " << s
<< " final_metadata_requested=" << s->final_metadata_requested
<< " seen_error=" << s->seen_error;
}
if (s->final_metadata_requested && s->seen_error) {
grpc_slice_buffer_reset_and_unref(&s->frame_storage);
@ -2043,10 +2029,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
auto r = grpc_deframe_unprocessed_incoming_frames(
s, &min_progress_size, &**s->recv_message, s->recv_message_flags);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG, "Deframe data frame: %s",
grpc_core::PollToString(r, [](absl::Status r) {
return r.ToString();
}).c_str());
VLOG(2) << "Deframe data frame: "
<< grpc_core::PollToString(
r, [](absl::Status r) { return r.ToString(); });
}
if (r.pending()) {
if (s->read_closed) {
@ -2099,12 +2084,11 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) {
grpc_chttp2_maybe_complete_recv_message(t, s);
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_trailing_metadata cli=%d s=%p closure=%p "
"read_closed=%d "
"write_closed=%d %" PRIdPTR,
t->is_client, s, s->recv_trailing_metadata_finished, s->read_closed,
s->write_closed, s->frame_storage.length);
VLOG(2) << "maybe_complete_recv_trailing_metadata cli=" << t->is_client
<< " s=" << s << " closure=" << s->recv_trailing_metadata_finished
<< " read_closed=" << s->read_closed
<< " write_closed=" << s->write_closed << " "
<< s->frame_storage.length;
}
if (s->recv_trailing_metadata_finished != nullptr && s->read_closed &&
s->write_closed) {
@ -2310,12 +2294,13 @@ grpc_chttp2_transport::RemovedStreamHandle grpc_chttp2_mark_stream_closed(
int close_writes, grpc_error_handle error) {
grpc_chttp2_transport::RemovedStreamHandle rsh;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_DEBUG, "MARK_STREAM_CLOSED: t=%p s=%p(id=%d) %s [%s]", t, s, s->id,
(close_reads && close_writes)
? "read+write"
: (close_reads ? "read" : (close_writes ? "write" : "nothing??")),
grpc_core::StatusToString(error).c_str());
VLOG(2) << "MARK_STREAM_CLOSED: t=" << t << " s=" << s << "(id=" << s->id
<< ") "
<< ((close_reads && close_writes)
? "read+write"
: (close_reads ? "read"
: (close_writes ? "write" : "nothing??")))
<< " [" << grpc_core::StatusToString(error) << "]";
}
if (s->read_closed && s->write_closed) {
// already closed, but we should still fake the status if needed.
@ -2723,11 +2708,10 @@ static void read_action_parse_loop_locked(
if (keep_reading) {
if (t->num_pending_induced_frames >= DEFAULT_MAX_PENDING_INDUCED_FRAMES) {
t->reading_paused_on_pending_induced_frames = true;
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO,
"transport %p : Pausing reading due to too "
"many unwritten SETTINGS ACK and RST_STREAM frames",
t.get()));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t.get()
<< " : Pausing reading due to too many unwritten "
"SETTINGS ACK and RST_STREAM frames";
} else {
continue_read_action_locked(std::move(t));
}
@ -2742,9 +2726,8 @@ static void read_action_locked(
if (t->keepalive_ping_timeout_handle != TaskHandle::kInvalid) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO,
"%s[%p]: Clear keepalive timer because data was received",
t->is_client ? "CLIENT" : "SERVER", t.get());
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t.get()
<< "]: Clear keepalive timer because data was received";
}
t->event_engine->Cancel(
std::exchange(t->keepalive_ping_timeout_handle, TaskHandle::kInvalid));
@ -2763,7 +2746,7 @@ static void continue_read_action_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
const bool urgent = !t->goaway_error.ok();
auto* tp = t.get();
grpc_endpoint_read(tp->ep, &tp->read_buffer,
grpc_endpoint_read(tp->ep.get(), &tp->read_buffer,
grpc_core::InitTransportClosure<read_action>(
std::move(t), &tp->read_action_locked),
urgent, grpc_chttp2_min_read_progress_size(tp));
@ -2795,9 +2778,8 @@ static void start_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
}
if (!error.ok() || !t->closed_with_error.ok()) {
return;
@ -2822,9 +2804,8 @@ static void finish_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
}
if (!error.ok() || !t->closed_with_error.ok()) {
return;
@ -2967,8 +2948,8 @@ static void finish_keepalive_ping_locked(
if (error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Finish keepalive ping",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Finish keepalive ping";
}
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
CHECK(t->keepalive_ping_timer_handle == TaskHandle::kInvalid);
@ -2989,8 +2970,8 @@ static void maybe_reset_keepalive_ping_timer_locked(grpc_chttp2_transport* t) {
// need to Ref or Unref here since we still hold the Ref.
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Keepalive ping cancelled. Resetting timer.",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << t->peer_string.as_string_view()
<< ": Keepalive ping cancelled. Resetting timer.";
}
t->keepalive_ping_timer_handle =
t->event_engine->RunAfter(t->keepalive_time, [t = t->Ref()]() mutable {
@ -3009,9 +2990,9 @@ static void connectivity_state_set(grpc_chttp2_transport* t,
grpc_connectivity_state state,
const absl::Status& status,
const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO, "transport %p set connectivity_state=%d; status=%s; reason=%s",
t, state, status.ToString().c_str(), reason));
GRPC_TRACE_LOG(http, INFO)
<< "transport " << t << " set connectivity_state=" << state
<< "; status=" << status.ToString() << "; reason=" << reason;
t->state_tracker.SetState(state, status, reason);
}
@ -3026,7 +3007,7 @@ void grpc_chttp2_transport::SetPollset(grpc_stream* /*gs*/,
// actually uses pollsets.
if (strcmp(grpc_get_poll_strategy_name(), "poll") != 0) return;
grpc_core::MutexLock lock(&ep_destroy_mu);
if (ep != nullptr) grpc_endpoint_add_to_pollset(ep, pollset);
if (ep != nullptr) grpc_endpoint_add_to_pollset(ep.get(), pollset);
}
void grpc_chttp2_transport::SetPollsetSet(grpc_stream* /*gs*/,
@ -3036,7 +3017,7 @@ void grpc_chttp2_transport::SetPollsetSet(grpc_stream* /*gs*/,
// actually uses pollsets.
if (strcmp(grpc_get_poll_strategy_name(), "poll") != 0) return;
grpc_core::MutexLock lock(&ep_destroy_mu);
if (ep != nullptr) grpc_endpoint_add_to_pollset_set(ep, pollset_set);
if (ep != nullptr) grpc_endpoint_add_to_pollset_set(ep.get(), pollset_set);
}
//
@ -3088,8 +3069,8 @@ static void benign_reclaimer_locked(
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
std::string(t->peer_string.as_string_view()).c_str());
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - send goaway to free memory";
}
send_goaway(t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
@ -3097,11 +3078,9 @@ static void benign_reclaimer_locked(
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
std::string(t->peer_string.as_string_view()).c_str(),
t->stream_map.size());
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - skip benign reclamation, there are still "
<< t->stream_map.size() << " streams";
}
t->benign_reclaimer_registered = false;
if (error != absl::CancelledError()) {
@ -3117,8 +3096,8 @@ static void destructive_reclaimer_locked(
// As stream_map is a hash map, this selects effectively a random stream.
grpc_chttp2_stream* s = t->stream_map.begin()->second;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
std::string(t->peer_string.as_string_view()).c_str(), s->id);
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - abandon stream id " << s->id;
}
grpc_chttp2_cancel_stream(
t.get(), s,
@ -3215,9 +3194,9 @@ grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport) {
}
grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client) {
return new grpc_chttp2_transport(channel_args, ep, is_client);
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> ep, bool is_client) {
return new grpc_chttp2_transport(channel_args, std::move(ep), is_client);
}
void grpc_chttp2_transport_start_reading(
@ -3228,7 +3207,6 @@ void grpc_chttp2_transport_start_reading(
auto t = reinterpret_cast<grpc_chttp2_transport*>(transport)->Ref();
if (read_buffer != nullptr) {
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
auto* tp = t.get();
tp->combiner->Run(
@ -3240,7 +3218,7 @@ void grpc_chttp2_transport_start_reading(
if (t->ep != nullptr &&
interested_parties_until_recv_settings != nullptr) {
grpc_endpoint_delete_from_pollset_set(
t->ep, interested_parties_until_recv_settings);
t->ep.get(), interested_parties_until_recv_settings);
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION, notify_on_receive_settings,
t->closed_with_error);

@ -44,8 +44,8 @@
/// from the caller; if the caller still needs the resource_user after creating
/// a transport, the caller must take another ref.
grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client);
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> ep, bool is_client);
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport);

@ -110,7 +110,7 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
if (t->notify_on_receive_settings != nullptr) {
if (t->interested_parties_until_recv_settings != nullptr) {
grpc_endpoint_delete_from_pollset_set(
t->ep, t->interested_parties_until_recv_settings);
t->ep.get(), t->interested_parties_until_recv_settings);
t->interested_parties_until_recv_settings = nullptr;
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION,

@ -226,7 +226,8 @@ typedef enum {
struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
public grpc_core::KeepsGrpcInitialized {
grpc_chttp2_transport(const grpc_core::ChannelArgs& channel_args,
grpc_endpoint* ep, bool is_client);
grpc_core::OrphanablePtr<grpc_endpoint> endpoint,
bool is_client);
~grpc_chttp2_transport() override;
void Orphan() override;
@ -257,7 +258,7 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
grpc_pollset_set* pollset_set) override;
void PerformOp(grpc_transport_op* op) override;
grpc_endpoint* ep;
grpc_core::OrphanablePtr<grpc_endpoint> ep;
grpc_core::Mutex ep_destroy_mu; // Guards endpoint destruction only.
grpc_core::Slice peer_string;

@ -717,7 +717,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
gpr_log(GPR_INFO,
"[t:%p fd:%d peer:%s] Accepting new stream; "
"num_incoming_streams_before_settings_ack=%u",
t, grpc_endpoint_get_fd(t->ep),
t, grpc_endpoint_get_fd(t->ep.get()),
std::string(t->peer_string.as_string_view()).c_str(),
t->num_incoming_streams_before_settings_ack);
}

@ -676,7 +676,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
num_stream_bytes = t->outbuf.c_slice_buffer()->length - orig_len;
s->byte_counter += static_cast<size_t>(num_stream_bytes);
++s->write_counter;
if (s->traced && grpc_endpoint_can_track_err(t->ep)) {
if (s->traced && grpc_endpoint_can_track_err(t->ep.get())) {
grpc_core::CopyContextFn copy_context_fn =
grpc_core::GrpcHttp2GetCopyContextFn();
if (copy_context_fn != nullptr &&

@ -17,7 +17,9 @@
#include "src/core/handshaker/endpoint_info/endpoint_info_handshaker.h"
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include <grpc/support/port_platform.h>
@ -38,17 +40,17 @@ namespace {
class EndpointInfoHandshaker : public Handshaker {
public:
const char* name() const override { return "endpoint_info"; }
absl::string_view name() const override { return "endpoint_info"; }
void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override {
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override {
args->args = args->args
.Set(GRPC_ARG_ENDPOINT_LOCAL_ADDRESS,
grpc_endpoint_get_local_address(args->endpoint))
grpc_endpoint_get_local_address(args->endpoint.get()))
.Set(GRPC_ARG_ENDPOINT_PEER_ADDRESS,
grpc_endpoint_get_peer(args->endpoint));
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done, absl::OkStatus());
grpc_endpoint_get_peer(args->endpoint.get()));
InvokeOnHandshakeDone(args, std::move(on_handshake_done), absl::OkStatus());
}
void Shutdown(grpc_error_handle /*why*/) override {}

@ -23,8 +23,10 @@
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include <grpc/byte_buffer.h>
@ -38,23 +40,37 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
using ::grpc_event_engine::experimental::EventEngine;
namespace grpc_core {
namespace {
void Handshaker::InvokeOnHandshakeDone(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done,
absl::Status status) {
args->event_engine->Run([on_handshake_done = std::move(on_handshake_done),
status = std::move(status)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
on_handshake_done(std::move(status));
// Destroy callback while ExecCtx is still in scope.
on_handshake_done = nullptr;
});
}
using ::grpc_event_engine::experimental::EventEngine;
namespace {
std::string HandshakerArgsString(HandshakerArgs* args) {
size_t read_buffer_length =
args->read_buffer != nullptr ? args->read_buffer->length : 0;
return absl::StrFormat(
"{endpoint=%p, args=%s, read_buffer=%p (length=%" PRIuPTR
"), exit_early=%d}",
args->endpoint, args->args.ToString(), args->read_buffer,
read_buffer_length, args->exit_early);
return absl::StrFormat("{endpoint=%p, args=%s, read_buffer.Length()=%" PRIuPTR
", exit_early=%d}",
args->endpoint.get(), args->args.ToString(),
args->read_buffer.Length(), args->exit_early);
}
} // namespace
@ -69,155 +85,129 @@ void HandshakeManager::Add(RefCountedPtr<Handshaker> handshaker) {
gpr_log(
GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), handshakers_.size());
this, std::string(handshaker->name()).c_str(), handshaker.get(),
handshakers_.size());
}
handshakers_.push_back(std::move(handshaker));
}
HandshakeManager::~HandshakeManager() { handshakers_.clear(); }
void HandshakeManager::DoHandshake(
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& channel_args,
Timestamp deadline, grpc_tcp_server_acceptor* acceptor,
absl::AnyInvocable<void(absl::StatusOr<HandshakerArgs*>)>
on_handshake_done) {
MutexLock lock(&mu_);
CHECK_EQ(index_, 0u);
on_handshake_done_ = std::move(on_handshake_done);
// Construct handshaker args. These will be passed through all
// handshakers and eventually be freed by the on_handshake_done callback.
args_.endpoint = std::move(endpoint);
args_.deadline = deadline;
args_.args = channel_args;
args_.event_engine = args_.args.GetObject<EventEngine>();
args_.acceptor = acceptor;
if (acceptor != nullptr && acceptor->external_connection &&
acceptor->pending_data != nullptr) {
grpc_slice_buffer_swap(args_.read_buffer.c_slice_buffer(),
&(acceptor->pending_data->data.raw.slice_buffer));
// TODO(vigneshbabu): For connections accepted through event engine
// listeners, the ownership of the byte buffer received is transferred to
// this callback and it is thus this callback's duty to delete it.
// Make this hack default once event engine is rolled out.
if (grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
args_.endpoint.get())) {
grpc_byte_buffer_destroy(acceptor->pending_data);
}
}
// Start deadline timer, which owns a ref.
const Duration time_to_deadline = deadline - Timestamp::Now();
deadline_timer_handle_ =
args_.event_engine->RunAfter(time_to_deadline, [self = Ref()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->Shutdown(GRPC_ERROR_CREATE("Handshake timed out"));
// HandshakeManager deletion might require an active ExecCtx.
self.reset();
});
// Start first handshaker.
CallNextHandshakerLocked(absl::OkStatus());
}
void HandshakeManager::Shutdown(grpc_error_handle why) {
{
MutexLock lock(&mu_);
void HandshakeManager::Shutdown(absl::Status error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(GPR_INFO, "handshake_manager %p: Shutdown() called: %s", this,
error.ToString().c_str());
}
is_shutdown_ = true;
// Shutdown the handshaker that's currently in progress, if any.
if (!is_shutdown_ && index_ > 0) {
is_shutdown_ = true;
handshakers_[index_ - 1]->Shutdown(why);
if (index_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: shutting down handshaker at index %" PRIuPTR,
this, index_ - 1);
}
handshakers_[index_ - 1]->Shutdown(std::move(error));
}
}
}
// Helper function to call either the next handshaker or the
// on_handshake_done callback.
// Returns true if we've scheduled the on_handshake_done callback.
bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
", args=%s",
this, StatusToString(error).c_str(), is_shutdown_, index_,
this, error.ToString().c_str(), is_shutdown_, index_,
HandshakerArgsString(&args_).c_str());
}
CHECK(index_ <= handshakers_.size());
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
// callback. Otherwise, call the next handshaker.
// callback.
if (!error.ok() || is_shutdown_ || args_.exit_early ||
index_ == handshakers_.size()) {
if (error.ok() && is_shutdown_) {
error = GRPC_ERROR_CREATE("handshaker shutdown");
// It is possible that the endpoint has already been destroyed by
// a shutdown call while this callback was sitting on the ExecCtx
// with no error.
if (args_.endpoint != nullptr) {
grpc_endpoint_destroy(args_.endpoint);
args_.endpoint = nullptr;
}
if (args_.read_buffer != nullptr) {
grpc_slice_buffer_destroy(args_.read_buffer);
gpr_free(args_.read_buffer);
args_.read_buffer = nullptr;
}
args_.args = ChannelArgs();
args_.endpoint.reset();
}
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(GPR_INFO,
"handshake_manager %p: handshaking complete -- scheduling "
"on_handshake_done with error=%s",
this, StatusToString(error).c_str());
this, error.ToString().c_str());
}
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
event_engine_->Cancel(deadline_timer_handle_);
ExecCtx::Run(DEBUG_LOCATION, &on_handshake_done_, error);
args_.event_engine->Cancel(deadline_timer_handle_);
is_shutdown_ = true;
} else {
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), index_);
}
handshaker->DoHandshake(acceptor_, &call_next_handshaker_, &args_);
}
++index_;
return is_shutdown_;
}
void HandshakeManager::CallNextHandshakerFn(void* arg,
grpc_error_handle error) {
auto* mgr = static_cast<HandshakeManager*>(arg);
bool done;
{
MutexLock lock(&mgr->mu_);
done = mgr->CallNextHandshakerLocked(error);
}
// If we're invoked the final callback, we won't be coming back
// to this function, so we can release our reference to the
// handshake manager.
if (done) {
mgr->Unref();
}
}
void HandshakeManager::DoHandshake(grpc_endpoint* endpoint,
const ChannelArgs& channel_args,
Timestamp deadline,
grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done,
void* user_data) {
bool done;
{
MutexLock lock(&mu_);
CHECK_EQ(index_, 0u);
// Construct handshaker args. These will be passed through all
// handshakers and eventually be freed by the on_handshake_done callback.
args_.endpoint = endpoint;
args_.deadline = deadline;
args_.args = channel_args;
args_.user_data = user_data;
args_.read_buffer =
static_cast<grpc_slice_buffer*>(gpr_malloc(sizeof(*args_.read_buffer)));
grpc_slice_buffer_init(args_.read_buffer);
if (acceptor != nullptr && acceptor->external_connection &&
acceptor->pending_data != nullptr) {
grpc_slice_buffer_swap(args_.read_buffer,
&(acceptor->pending_data->data.raw.slice_buffer));
// TODO(vigneshbabu): For connections accepted through event engine
// listeners, the ownership of the byte buffer received is transferred to
// this callback and it is thus this callback's duty to delete it.
// Make this hack default once event engine is rolled out.
if (grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
endpoint)) {
grpc_byte_buffer_destroy(acceptor->pending_data);
}
}
// Initialize state needed for calling handshakers.
acceptor_ = acceptor;
GRPC_CLOSURE_INIT(&call_next_handshaker_,
&HandshakeManager::CallNextHandshakerFn, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_handshake_done_, on_handshake_done, &args_,
grpc_schedule_on_exec_ctx);
// Start deadline timer, which owns a ref.
const Duration time_to_deadline = deadline - Timestamp::Now();
event_engine_ = args_.args.GetObjectRef<EventEngine>();
deadline_timer_handle_ =
event_engine_->RunAfter(time_to_deadline, [self = Ref()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->Shutdown(GRPC_ERROR_CREATE("Handshake timed out"));
// HandshakeManager deletion might require an active ExecCtx.
self.reset();
});
// Start first handshaker, which also owns a ref.
Ref().release();
done = CallNextHandshakerLocked(absl::OkStatus());
absl::StatusOr<HandshakerArgs*> result(&args_);
if (!error.ok()) result = std::move(error);
args_.event_engine->Run([on_handshake_done = std::move(on_handshake_done_),
result = std::move(result)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
on_handshake_done(std::move(result));
// Destroy callback while ExecCtx is still in scope.
on_handshake_done = nullptr;
});
return;
}
if (done) {
Unref();
// Call the next handshaker.
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
this, std::string(handshaker->name()).c_str(), handshaker.get(),
index_);
}
++index_;
handshaker->DoHandshake(&args_, [self = Ref()](absl::Status error) mutable {
MutexLock lock(&self->mu_);
self->CallNextHandshakerLocked(std::move(error));
});
}
} // namespace grpc_core

@ -31,6 +31,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
@ -39,6 +40,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/slice/slice_buffer.h"
namespace grpc_core {
@ -49,34 +51,35 @@ namespace grpc_core {
///
/// In general, handshakers should be used via a handshake manager.
/// Arguments passed through handshakers and to the on_handshake_done callback.
/// Arguments passed through handshakers and back to the caller.
///
/// For handshakers, all members are input/output parameters; for
/// example, a handshaker may read from or write to \a endpoint and
/// then later replace it with a wrapped endpoint. Similarly, a
/// handshaker may modify \a args.
///
/// A handshaker takes ownership of the members while a handshake is in
/// progress. Upon failure or shutdown of an in-progress handshaker,
/// the handshaker is responsible for destroying the members and setting
/// them to NULL before invoking the on_handshake_done callback.
///
/// For the on_handshake_done callback, all members are input arguments,
/// which the callback takes ownership of.
/// A handshaker takes ownership of the members when this struct is
/// passed to DoHandshake(). It passes ownership back to the caller
/// when it invokes on_handshake_done.
struct HandshakerArgs {
grpc_endpoint* endpoint = nullptr;
OrphanablePtr<grpc_endpoint> endpoint;
ChannelArgs args;
grpc_slice_buffer* read_buffer = nullptr;
// Any bytes read from the endpoint that are not consumed by the
// handshaker must be passed back via this buffer.
SliceBuffer read_buffer;
// A handshaker may set this to true before invoking on_handshake_done
// to indicate that subsequent handshakers should be skipped.
bool exit_early = false;
// User data passed through the handshake manager. Not used by
// individual handshakers.
void* user_data = nullptr;
// EventEngine to use for async work.
// (This is just a convenience to avoid digging it out of args.)
grpc_event_engine::experimental::EventEngine* event_engine = nullptr;
// Deadline associated with the handshake.
// TODO(anramach): Move this out of handshake args after EventEngine
// is the default.
Timestamp deadline;
// TODO(roth): Make this go away somehow as part of the EventEngine
// migration?
grpc_tcp_server_acceptor* acceptor = nullptr;
};
///
@ -86,11 +89,23 @@ struct HandshakerArgs {
class Handshaker : public RefCounted<Handshaker> {
public:
~Handshaker() override = default;
virtual void Shutdown(grpc_error_handle why) = 0;
virtual void DoHandshake(grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
HandshakerArgs* args) = 0;
virtual const char* name() const = 0;
virtual absl::string_view name() const = 0;
virtual void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) = 0;
virtual void Shutdown(absl::Status error) = 0;
protected:
// Helper function to safely invoke on_handshake_done asynchronously.
//
// Note that on_handshake_done may complete in another thread as soon
// as this method returns, so the handshaker object may be destroyed
// by the callback unless the caller of this method is holding its own
// ref to the handshaker.
static void InvokeOnHandshakeDone(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done,
absl::Status status);
};
//
@ -100,16 +115,11 @@ class Handshaker : public RefCounted<Handshaker> {
class HandshakeManager : public RefCounted<HandshakeManager> {
public:
HandshakeManager();
~HandshakeManager() override;
/// Adds a handshaker to the handshake manager.
/// Takes ownership of \a handshaker.
void Add(RefCountedPtr<Handshaker> handshaker) ABSL_LOCKS_EXCLUDED(mu_);
/// Shuts down the handshake manager (e.g., to clean up when the operation is
/// aborted in the middle).
void Shutdown(grpc_error_handle why) ABSL_LOCKS_EXCLUDED(mu_);
/// Invokes handshakers in the order they were added.
/// Takes ownership of \a endpoint, and then passes that ownership to
/// the \a on_handshake_done callback.
@ -122,41 +132,39 @@ class HandshakeManager : public RefCounted<HandshakeManager> {
/// absl::OkStatus(), then handshaking failed and the handshaker has done
/// the necessary clean-up. Otherwise, the callback takes ownership of
/// the arguments.
void DoHandshake(grpc_endpoint* endpoint, const ChannelArgs& channel_args,
Timestamp deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data)
ABSL_LOCKS_EXCLUDED(mu_);
void DoHandshake(OrphanablePtr<grpc_endpoint> endpoint,
const ChannelArgs& channel_args, Timestamp deadline,
grpc_tcp_server_acceptor* acceptor,
absl::AnyInvocable<void(absl::StatusOr<HandshakerArgs*>)>
on_handshake_done) ABSL_LOCKS_EXCLUDED(mu_);
private:
bool CallNextHandshakerLocked(grpc_error_handle error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Shuts down the handshake manager (e.g., to clean up when the operation is
/// aborted in the middle).
void Shutdown(absl::Status error) ABSL_LOCKS_EXCLUDED(mu_);
private:
// A function used as the handshaker-done callback when chaining
// handshakers together.
static void CallNextHandshakerFn(void* arg, grpc_error_handle error)
ABSL_LOCKS_EXCLUDED(mu_);
void CallNextHandshakerLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static const size_t HANDSHAKERS_INIT_SIZE = 2;
static const size_t kHandshakerListInlineSize = 2;
Mutex mu_;
bool is_shutdown_ ABSL_GUARDED_BY(mu_) = false;
// An array of handshakers added via grpc_handshake_manager_add().
absl::InlinedVector<RefCountedPtr<Handshaker>, HANDSHAKERS_INIT_SIZE>
handshakers_ ABSL_GUARDED_BY(mu_);
// The index of the handshaker to invoke next and closure to invoke it.
size_t index_ ABSL_GUARDED_BY(mu_) = 0;
grpc_closure call_next_handshaker_ ABSL_GUARDED_BY(mu_);
// The acceptor to call the handshakers with.
grpc_tcp_server_acceptor* acceptor_ ABSL_GUARDED_BY(mu_);
// The final callback and user_data to invoke after the last handshaker.
grpc_closure on_handshake_done_ ABSL_GUARDED_BY(mu_);
// An array of handshakers added via Add().
absl::InlinedVector<RefCountedPtr<Handshaker>, kHandshakerListInlineSize>
handshakers_ ABSL_GUARDED_BY(mu_);
// Handshaker args.
HandshakerArgs args_ ABSL_GUARDED_BY(mu_);
// The final callback to invoke after the last handshaker.
absl::AnyInvocable<void(absl::StatusOr<HandshakerArgs*>)> on_handshake_done_
ABSL_GUARDED_BY(mu_);
// Deadline timer across all handshakers.
grpc_event_engine::experimental::EventEngine::TaskHandle
deadline_timer_handle_ ABSL_GUARDED_BY(mu_);
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine_
ABSL_GUARDED_BY(mu_);
};
} // namespace grpc_core

@ -23,6 +23,7 @@
#include <memory>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
@ -50,6 +51,8 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/util/http_client/format_request.h"
#include "src/core/util/http_client/parser.h"
#include "src/core/util/string.h"
@ -61,165 +64,148 @@ namespace {
class HttpConnectHandshaker : public Handshaker {
public:
HttpConnectHandshaker();
void Shutdown(grpc_error_handle why) override;
void DoHandshake(grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override;
const char* name() const override { return "http_connect"; }
absl::string_view name() const override { return "http_connect"; }
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override;
void Shutdown(absl::Status error) override;
private:
~HttpConnectHandshaker() override;
void CleanupArgsForFailureLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void HandshakeFailedLocked(grpc_error_handle error)
void HandshakeFailedLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static void OnWriteDone(void* arg, grpc_error_handle error);
static void OnReadDone(void* arg, grpc_error_handle error);
void FinishLocked(absl::Status error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void OnWriteDone(absl::Status error);
void OnReadDone(absl::Status error);
bool OnReadDoneLocked(absl::Status error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static void OnWriteDoneScheduler(void* arg, grpc_error_handle error);
static void OnReadDoneScheduler(void* arg, grpc_error_handle error);
Mutex mu_;
bool is_shutdown_ ABSL_GUARDED_BY(mu_) = false;
// Read buffer to destroy after a shutdown.
grpc_slice_buffer* read_buffer_to_destroy_ ABSL_GUARDED_BY(mu_) = nullptr;
// State saved while performing the handshake.
HandshakerArgs* args_ = nullptr;
grpc_closure* on_handshake_done_ = nullptr;
absl::AnyInvocable<void(absl::Status)> on_handshake_done_
ABSL_GUARDED_BY(mu_);
// Objects for processing the HTTP CONNECT request and response.
grpc_slice_buffer write_buffer_ ABSL_GUARDED_BY(mu_);
grpc_closure request_done_closure_ ABSL_GUARDED_BY(mu_);
grpc_closure response_read_closure_ ABSL_GUARDED_BY(mu_);
SliceBuffer write_buffer_ ABSL_GUARDED_BY(mu_);
grpc_closure on_write_done_scheduler_ ABSL_GUARDED_BY(mu_);
grpc_closure on_read_done_scheduler_ ABSL_GUARDED_BY(mu_);
grpc_http_parser http_parser_ ABSL_GUARDED_BY(mu_);
grpc_http_response http_response_ ABSL_GUARDED_BY(mu_);
};
HttpConnectHandshaker::~HttpConnectHandshaker() {
if (read_buffer_to_destroy_ != nullptr) {
grpc_slice_buffer_destroy(read_buffer_to_destroy_);
gpr_free(read_buffer_to_destroy_);
}
grpc_slice_buffer_destroy(&write_buffer_);
grpc_http_parser_destroy(&http_parser_);
grpc_http_response_destroy(&http_response_);
}
// Set args fields to nullptr, saving the endpoint and read buffer for
// later destruction.
void HttpConnectHandshaker::CleanupArgsForFailureLocked() {
read_buffer_to_destroy_ = args_->read_buffer;
args_->read_buffer = nullptr;
args_->args = ChannelArgs();
}
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
void HttpConnectHandshaker::HandshakeFailedLocked(grpc_error_handle error) {
void HttpConnectHandshaker::HandshakeFailedLocked(absl::Status error) {
if (error.ok()) {
// If we were shut down after an endpoint operation succeeded but
// before the endpoint callback was invoked, we need to generate our
// own error.
error = GRPC_ERROR_CREATE("Handshaker shutdown");
}
if (!is_shutdown_) {
// Not shutting down, so the handshake failed. Clean up before
// invoking the callback.
grpc_endpoint_destroy(args_->endpoint);
args_->endpoint = nullptr;
CleanupArgsForFailureLocked();
// Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing.
is_shutdown_ = true;
}
// Invoke callback.
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done_, error);
FinishLocked(std::move(error));
}
void HttpConnectHandshaker::FinishLocked(absl::Status error) {
InvokeOnHandshakeDone(args_, std::move(on_handshake_done_), std::move(error));
}
// This callback can be invoked inline while already holding onto the mutex. To
// avoid deadlocks, schedule OnWriteDone on ExecCtx.
// TODO(roth): This hop will no longer be needed when we migrate to the
// EventEngine endpoint API.
void HttpConnectHandshaker::OnWriteDoneScheduler(void* arg,
grpc_error_handle error) {
auto* handshaker = static_cast<HttpConnectHandshaker*>(arg);
ExecCtx::Run(DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&handshaker->request_done_closure_,
&HttpConnectHandshaker::OnWriteDone,
handshaker, grpc_schedule_on_exec_ctx),
error);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnWriteDone(std::move(error));
});
}
// Callback invoked when finished writing HTTP CONNECT request.
void HttpConnectHandshaker::OnWriteDone(void* arg, grpc_error_handle error) {
auto* handshaker = static_cast<HttpConnectHandshaker*>(arg);
ReleasableMutexLock lock(&handshaker->mu_);
if (!error.ok() || handshaker->is_shutdown_) {
void HttpConnectHandshaker::OnWriteDone(absl::Status error) {
ReleasableMutexLock lock(&mu_);
if (!error.ok() || args_->endpoint == nullptr) {
// If the write failed or we're shutting down, clean up and invoke the
// callback with the error.
handshaker->HandshakeFailedLocked(error);
HandshakeFailedLocked(error);
lock.Release();
handshaker->Unref();
Unref();
} else {
// Otherwise, read the response.
// The read callback inherits our ref to the handshaker.
grpc_endpoint_read(
handshaker->args_->endpoint, handshaker->args_->read_buffer,
GRPC_CLOSURE_INIT(&handshaker->response_read_closure_,
&HttpConnectHandshaker::OnReadDoneScheduler,
handshaker, grpc_schedule_on_exec_ctx),
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(&on_read_done_scheduler_,
&HttpConnectHandshaker::OnReadDoneScheduler, this,
grpc_schedule_on_exec_ctx),
/*urgent=*/true, /*min_progress_size=*/1);
}
}
// This callback can be invoked inline while already holding onto the mutex. To
// avoid deadlocks, schedule OnReadDone on ExecCtx.
// TODO(roth): This hop will no longer be needed when we migrate to the
// EventEngine endpoint API.
void HttpConnectHandshaker::OnReadDoneScheduler(void* arg,
grpc_error_handle error) {
auto* handshaker = static_cast<HttpConnectHandshaker*>(arg);
ExecCtx::Run(DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&handshaker->response_read_closure_,
&HttpConnectHandshaker::OnReadDone, handshaker,
grpc_schedule_on_exec_ctx),
error);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnReadDone(std::move(error));
});
}
// Callback invoked for reading HTTP CONNECT response.
void HttpConnectHandshaker::OnReadDone(void* arg, grpc_error_handle error) {
auto* handshaker = static_cast<HttpConnectHandshaker*>(arg);
ReleasableMutexLock lock(&handshaker->mu_);
if (!error.ok() || handshaker->is_shutdown_) {
void HttpConnectHandshaker::OnReadDone(absl::Status error) {
bool done;
{
MutexLock lock(&mu_);
done = OnReadDoneLocked(std::move(error));
}
if (done) Unref();
}
bool HttpConnectHandshaker::OnReadDoneLocked(absl::Status error) {
if (!error.ok() || args_->endpoint == nullptr) {
// If the read failed or we're shutting down, clean up and invoke the
// callback with the error.
handshaker->HandshakeFailedLocked(error);
goto done;
HandshakeFailedLocked(std::move(error));
return true;
}
// Add buffer to parser.
for (size_t i = 0; i < handshaker->args_->read_buffer->count; ++i) {
if (GRPC_SLICE_LENGTH(handshaker->args_->read_buffer->slices[i]) > 0) {
while (args_->read_buffer.Count() > 0) {
Slice slice = args_->read_buffer.TakeFirst();
if (slice.length() > 0) {
size_t body_start_offset = 0;
error = grpc_http_parser_parse(&handshaker->http_parser_,
handshaker->args_->read_buffer->slices[i],
error = grpc_http_parser_parse(&http_parser_, slice.c_slice(),
&body_start_offset);
if (!error.ok()) {
handshaker->HandshakeFailedLocked(error);
goto done;
HandshakeFailedLocked(std::move(error));
return true;
}
if (handshaker->http_parser_.state == GRPC_HTTP_BODY) {
if (http_parser_.state == GRPC_HTTP_BODY) {
// Remove the data we've already read from the read buffer,
// leaving only the leftover bytes (if any).
grpc_slice_buffer tmp_buffer;
grpc_slice_buffer_init(&tmp_buffer);
if (body_start_offset <
GRPC_SLICE_LENGTH(handshaker->args_->read_buffer->slices[i])) {
grpc_slice_buffer_add(
&tmp_buffer,
grpc_slice_split_tail(&handshaker->args_->read_buffer->slices[i],
body_start_offset));
SliceBuffer tmp_buffer;
if (body_start_offset < slice.length()) {
tmp_buffer.Append(slice.Split(body_start_offset));
}
grpc_slice_buffer_addn(&tmp_buffer,
&handshaker->args_->read_buffer->slices[i + 1],
handshaker->args_->read_buffer->count - i - 1);
grpc_slice_buffer_swap(handshaker->args_->read_buffer, &tmp_buffer);
grpc_slice_buffer_destroy(&tmp_buffer);
tmp_buffer.TakeAndAppend(args_->read_buffer);
tmp_buffer.Swap(&args_->read_buffer);
break;
}
}
@ -235,65 +221,46 @@ void HttpConnectHandshaker::OnReadDone(void* arg, grpc_error_handle error) {
// need to fix the HTTP parser to understand when the body is
// complete (e.g., handling chunked transfer encoding or looking
// at the Content-Length: header).
if (handshaker->http_parser_.state != GRPC_HTTP_BODY) {
grpc_slice_buffer_reset_and_unref(handshaker->args_->read_buffer);
if (http_parser_.state != GRPC_HTTP_BODY) {
args_->read_buffer.Clear();
grpc_endpoint_read(
handshaker->args_->endpoint, handshaker->args_->read_buffer,
GRPC_CLOSURE_INIT(&handshaker->response_read_closure_,
&HttpConnectHandshaker::OnReadDoneScheduler,
handshaker, grpc_schedule_on_exec_ctx),
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(&on_read_done_scheduler_,
&HttpConnectHandshaker::OnReadDoneScheduler, this,
grpc_schedule_on_exec_ctx),
/*urgent=*/true, /*min_progress_size=*/1);
return;
return false;
}
// Make sure we got a 2xx response.
if (handshaker->http_response_.status < 200 ||
handshaker->http_response_.status >= 300) {
if (http_response_.status < 200 || http_response_.status >= 300) {
error = GRPC_ERROR_CREATE(absl::StrCat("HTTP proxy returned response code ",
handshaker->http_response_.status));
handshaker->HandshakeFailedLocked(error);
goto done;
http_response_.status));
HandshakeFailedLocked(std::move(error));
return true;
}
// Success. Invoke handshake-done callback.
ExecCtx::Run(DEBUG_LOCATION, handshaker->on_handshake_done_, error);
done:
// Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing.
handshaker->is_shutdown_ = true;
lock.Release();
handshaker->Unref();
FinishLocked(absl::OkStatus());
return true;
}
//
// Public handshaker methods
//
void HttpConnectHandshaker::Shutdown(grpc_error_handle /*why*/) {
{
MutexLock lock(&mu_);
if (!is_shutdown_) {
is_shutdown_ = true;
grpc_endpoint_destroy(args_->endpoint);
args_->endpoint = nullptr;
CleanupArgsForFailureLocked();
}
}
void HttpConnectHandshaker::Shutdown(absl::Status /*error*/) {
MutexLock lock(&mu_);
if (on_handshake_done_ != nullptr) args_->endpoint.reset();
}
void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) {
void HttpConnectHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
// Check for HTTP CONNECT channel arg.
// If not found, invoke on_handshake_done without doing anything.
absl::optional<absl::string_view> server_name =
args->args.GetString(GRPC_ARG_HTTP_CONNECT_SERVER);
if (!server_name.has_value()) {
// Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing.
{
MutexLock lock(&mu_);
is_shutdown_ = true;
}
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done, absl::OkStatus());
InvokeOnHandshakeDone(args, std::move(on_handshake_done), absl::OkStatus());
return;
}
// Get headers from channel args.
@ -311,7 +278,6 @@ void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
gpr_malloc(sizeof(grpc_http_header) * num_header_strings));
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == nullptr) {
gpr_log(GPR_ERROR, "skipping unparseable HTTP CONNECT header: %s",
header_strings[i]);
@ -326,9 +292,9 @@ void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
// Save state in the handshaker object.
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = on_handshake_done;
on_handshake_done_ = std::move(on_handshake_done);
// Log connection via proxy.
std::string proxy_name(grpc_endpoint_get_peer(args->endpoint));
std::string proxy_name(grpc_endpoint_get_peer(args->endpoint.get()));
std::string server_name_string(*server_name);
gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s",
server_name_string.c_str(), proxy_name.c_str());
@ -342,7 +308,7 @@ void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
request.body = nullptr;
grpc_slice request_slice = grpc_httpcli_format_connect_request(
&request, server_name_string.c_str(), server_name_string.c_str());
grpc_slice_buffer_add(&write_buffer_, request_slice);
write_buffer_.Append(Slice(request_slice));
// Clean up.
gpr_free(headers);
for (size_t i = 0; i < num_header_strings; ++i) {
@ -352,15 +318,14 @@ void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
// Take a new ref to be held by the write callback.
Ref().release();
grpc_endpoint_write(
args->endpoint, &write_buffer_,
GRPC_CLOSURE_INIT(&request_done_closure_,
args->endpoint.get(), write_buffer_.c_slice_buffer(),
GRPC_CLOSURE_INIT(&on_write_done_scheduler_,
&HttpConnectHandshaker::OnWriteDoneScheduler, this,
grpc_schedule_on_exec_ctx),
nullptr, /*max_frame_size=*/INT_MAX);
}
HttpConnectHandshaker::HttpConnectHandshaker() {
grpc_slice_buffer_init(&write_buffer_);
grpc_http_parser_init(&http_parser_, GRPC_HTTP_RESPONSE, &http_response_);
}

@ -23,6 +23,7 @@
#include <algorithm>
#include <atomic>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/log/check.h"
@ -43,9 +44,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
@ -64,17 +67,18 @@ static void on_read(void* user_data, grpc_error_handle error);
static void on_write(void* user_data, grpc_error_handle error);
namespace {
struct secure_endpoint {
secure_endpoint(const grpc_endpoint_vtable* vtable,
struct secure_endpoint : public grpc_endpoint {
secure_endpoint(const grpc_endpoint_vtable* vtbl,
tsi_frame_protector* protector,
tsi_zero_copy_grpc_protector* zero_copy_protector,
grpc_endpoint* transport, grpc_slice* leftover_slices,
grpc_core::OrphanablePtr<grpc_endpoint> endpoint,
grpc_slice* leftover_slices,
const grpc_channel_args* channel_args,
size_t leftover_nslices)
: wrapped_ep(transport),
: wrapped_ep(std::move(endpoint)),
protector(protector),
zero_copy_protector(zero_copy_protector) {
base.vtable = vtable;
this->vtable = vtbl;
gpr_mu_init(&protector_mu);
GRPC_CLOSURE_INIT(&on_read, ::on_read, this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_write, ::on_write, this, grpc_schedule_on_exec_ctx);
@ -117,8 +121,7 @@ struct secure_endpoint {
gpr_mu_destroy(&protector_mu);
}
grpc_endpoint base;
grpc_endpoint* wrapped_ep;
grpc_core::OrphanablePtr<grpc_endpoint> wrapped_ep;
struct tsi_frame_protector* protector;
struct tsi_zero_copy_grpc_protector* zero_copy_protector;
gpr_mu protector_mu;
@ -365,8 +368,8 @@ static void endpoint_read(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
return;
}
grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read, urgent,
/*min_progress_size=*/ep->min_progress_size);
grpc_endpoint_read(ep->wrapped_ep.get(), &ep->source_buffer, &ep->on_read,
urgent, /*min_progress_size=*/ep->min_progress_size);
}
static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
@ -500,52 +503,52 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
// output_buffer at any time until the write completes.
SECURE_ENDPOINT_REF(ep, "write");
ep->write_cb = cb;
grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, &ep->on_write, arg,
max_frame_size);
grpc_endpoint_write(ep->wrapped_ep.get(), &ep->output_buffer, &ep->on_write,
arg, max_frame_size);
}
static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
grpc_endpoint_destroy(ep->wrapped_ep);
ep->wrapped_ep.reset();
SECURE_ENDPOINT_UNREF(ep, "destroy");
}
static void endpoint_add_to_pollset(grpc_endpoint* secure_ep,
grpc_pollset* pollset) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
grpc_endpoint_add_to_pollset(ep->wrapped_ep.get(), pollset);
}
static void endpoint_add_to_pollset_set(grpc_endpoint* secure_ep,
grpc_pollset_set* pollset_set) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
grpc_endpoint_add_to_pollset_set(ep->wrapped_ep, pollset_set);
grpc_endpoint_add_to_pollset_set(ep->wrapped_ep.get(), pollset_set);
}
static void endpoint_delete_from_pollset_set(grpc_endpoint* secure_ep,
grpc_pollset_set* pollset_set) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
grpc_endpoint_delete_from_pollset_set(ep->wrapped_ep, pollset_set);
grpc_endpoint_delete_from_pollset_set(ep->wrapped_ep.get(), pollset_set);
}
static absl::string_view endpoint_get_peer(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_get_peer(ep->wrapped_ep);
return grpc_endpoint_get_peer(ep->wrapped_ep.get());
}
static absl::string_view endpoint_get_local_address(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_get_local_address(ep->wrapped_ep);
return grpc_endpoint_get_local_address(ep->wrapped_ep.get());
}
static int endpoint_get_fd(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_get_fd(ep->wrapped_ep);
return grpc_endpoint_get_fd(ep->wrapped_ep.get());
}
static bool endpoint_can_track_err(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_can_track_err(ep->wrapped_ep);
return grpc_endpoint_can_track_err(ep->wrapped_ep.get());
}
static const grpc_endpoint_vtable vtable = {endpoint_read,
@ -559,13 +562,13 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_get_fd,
endpoint_can_track_err};
grpc_endpoint* grpc_secure_endpoint_create(
grpc_core::OrphanablePtr<grpc_endpoint> grpc_secure_endpoint_create(
struct tsi_frame_protector* protector,
struct tsi_zero_copy_grpc_protector* zero_copy_protector,
grpc_endpoint* to_wrap, grpc_slice* leftover_slices,
const grpc_channel_args* channel_args, size_t leftover_nslices) {
secure_endpoint* ep =
new secure_endpoint(&vtable, protector, zero_copy_protector, to_wrap,
leftover_slices, channel_args, leftover_nslices);
return &ep->base;
grpc_core::OrphanablePtr<grpc_endpoint> to_wrap,
grpc_slice* leftover_slices, const grpc_channel_args* channel_args,
size_t leftover_nslices) {
return grpc_core::MakeOrphanable<secure_endpoint>(
&vtable, protector, zero_copy_protector, std::move(to_wrap),
leftover_slices, channel_args, leftover_nslices);
}

@ -26,15 +26,17 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/endpoint.h"
// Takes ownership of protector, zero_copy_protector, and to_wrap, and refs
// leftover_slices. If zero_copy_protector is not NULL, protector will never be
// used.
grpc_endpoint* grpc_secure_endpoint_create(
grpc_core::OrphanablePtr<grpc_endpoint> grpc_secure_endpoint_create(
struct tsi_frame_protector* protector,
struct tsi_zero_copy_grpc_protector* zero_copy_protector,
grpc_endpoint* to_wrap, grpc_slice* leftover_slices,
const grpc_channel_args* channel_args, size_t leftover_nslices);
grpc_core::OrphanablePtr<grpc_endpoint> to_wrap,
grpc_slice* leftover_slices, const grpc_channel_args* channel_args,
size_t leftover_nslices);
#endif // GRPC_SRC_CORE_HANDSHAKER_SECURITY_SECURE_ENDPOINT_H

@ -28,6 +28,7 @@
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
@ -79,11 +80,11 @@ class SecurityHandshaker : public Handshaker {
grpc_security_connector* connector,
const ChannelArgs& args);
~SecurityHandshaker() override;
void Shutdown(grpc_error_handle why) override;
void DoHandshake(grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override;
const char* name() const override { return "security"; }
absl::string_view name() const override { return "security"; }
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override;
void Shutdown(absl::Status error) override;
private:
grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received,
@ -92,12 +93,11 @@ class SecurityHandshaker : public Handshaker {
grpc_error_handle OnHandshakeNextDoneLocked(
tsi_result result, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
void HandshakeFailedLocked(grpc_error_handle error);
void CleanupArgsForFailureLocked();
void HandshakeFailedLocked(absl::Status error);
void Finish(absl::Status status);
static void OnHandshakeDataReceivedFromPeerFn(void* arg,
grpc_error_handle error);
static void OnHandshakeDataSentToPeerFn(void* arg, grpc_error_handle error);
void OnHandshakeDataReceivedFromPeerFn(absl::Status error);
void OnHandshakeDataSentToPeerFn(absl::Status error);
static void OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error);
static void OnHandshakeDataSentToPeerFnScheduler(void* arg,
@ -117,16 +117,14 @@ class SecurityHandshaker : public Handshaker {
Mutex mu_;
bool is_shutdown_ = false;
// Read buffer to destroy after a shutdown.
grpc_slice_buffer* read_buffer_to_destroy_ = nullptr;
// State saved while performing the handshake.
HandshakerArgs* args_ = nullptr;
grpc_closure* on_handshake_done_ = nullptr;
absl::AnyInvocable<void(absl::Status)> on_handshake_done_;
size_t handshake_buffer_size_;
unsigned char* handshake_buffer_;
grpc_slice_buffer outgoing_;
SliceBuffer outgoing_;
grpc_closure on_handshake_data_sent_to_peer_;
grpc_closure on_handshake_data_received_from_peer_;
grpc_closure on_peer_checked_;
@ -146,7 +144,6 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))),
max_frame_size_(
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {
grpc_slice_buffer_init(&outgoing_);
GRPC_CLOSURE_INIT(&on_peer_checked_, &SecurityHandshaker::OnPeerCheckedFn,
this, grpc_schedule_on_exec_ctx);
}
@ -154,45 +151,30 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
SecurityHandshaker::~SecurityHandshaker() {
tsi_handshaker_destroy(handshaker_);
tsi_handshaker_result_destroy(handshaker_result_);
if (read_buffer_to_destroy_ != nullptr) {
grpc_slice_buffer_destroy(read_buffer_to_destroy_);
gpr_free(read_buffer_to_destroy_);
}
gpr_free(handshake_buffer_);
grpc_slice_buffer_destroy(&outgoing_);
auth_context_.reset(DEBUG_LOCATION, "handshake");
connector_.reset(DEBUG_LOCATION, "handshake");
}
size_t SecurityHandshaker::MoveReadBufferIntoHandshakeBuffer() {
size_t bytes_in_read_buffer = args_->read_buffer->length;
size_t bytes_in_read_buffer = args_->read_buffer.Length();
if (handshake_buffer_size_ < bytes_in_read_buffer) {
handshake_buffer_ = static_cast<uint8_t*>(
gpr_realloc(handshake_buffer_, bytes_in_read_buffer));
handshake_buffer_size_ = bytes_in_read_buffer;
}
size_t offset = 0;
while (args_->read_buffer->count > 0) {
grpc_slice* next_slice = grpc_slice_buffer_peek_first(args_->read_buffer);
memcpy(handshake_buffer_ + offset, GRPC_SLICE_START_PTR(*next_slice),
GRPC_SLICE_LENGTH(*next_slice));
offset += GRPC_SLICE_LENGTH(*next_slice);
grpc_slice_buffer_remove_first(args_->read_buffer);
while (args_->read_buffer.Count() > 0) {
Slice slice = args_->read_buffer.TakeFirst();
memcpy(handshake_buffer_ + offset, slice.data(), slice.size());
offset += slice.size();
}
return bytes_in_read_buffer;
}
// Set args_ fields to NULL, saving the endpoint and read buffer for
// later destruction.
void SecurityHandshaker::CleanupArgsForFailureLocked() {
read_buffer_to_destroy_ = args_->read_buffer;
args_->read_buffer = nullptr;
args_->args = ChannelArgs();
}
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
void SecurityHandshaker::HandshakeFailedLocked(grpc_error_handle error) {
void SecurityHandshaker::HandshakeFailedLocked(absl::Status error) {
if (error.ok()) {
// If we were shut down after the handshake succeeded but before an
// endpoint callback was invoked, we need to generate our own error.
@ -200,17 +182,17 @@ void SecurityHandshaker::HandshakeFailedLocked(grpc_error_handle error) {
}
if (!is_shutdown_) {
tsi_handshaker_shutdown(handshaker_);
grpc_endpoint_destroy(args_->endpoint);
args_->endpoint = nullptr;
// Not shutting down, so the write failed. Clean up before
// invoking the callback.
CleanupArgsForFailureLocked();
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
is_shutdown_ = true;
}
// Invoke callback.
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done_, error);
Finish(std::move(error));
}
void SecurityHandshaker::Finish(absl::Status status) {
InvokeOnHandshakeDone(args_, std::move(on_handshake_done_),
std::move(status));
}
namespace {
@ -306,19 +288,18 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
grpc_slice slice = grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(unused_bytes), unused_bytes_size);
args_->endpoint = grpc_secure_endpoint_create(
protector, zero_copy_protector, args_->endpoint, &slice,
protector, zero_copy_protector, std::move(args_->endpoint), &slice,
args_->args.ToC().get(), 1);
CSliceUnref(slice);
} else {
args_->endpoint = grpc_secure_endpoint_create(
protector, zero_copy_protector, args_->endpoint, nullptr,
protector, zero_copy_protector, std::move(args_->endpoint), nullptr,
args_->args.ToC().get(), 0);
}
} else if (unused_bytes_size > 0) {
// Not wrapping the endpoint, so just pass along unused bytes.
grpc_slice slice = grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(unused_bytes), unused_bytes_size);
grpc_slice_buffer_add(args_->read_buffer, slice);
args_->read_buffer.Append(Slice::FromCopiedBuffer(
reinterpret_cast<const char*>(unused_bytes), unused_bytes_size));
}
// Done with handshaker result.
tsi_handshaker_result_destroy(handshaker_result_);
@ -329,11 +310,11 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
args_->args = args_->args.SetObject(
MakeChannelzSecurityFromAuthContext(auth_context_.get()));
}
// Invoke callback.
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done_, absl::OkStatus());
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
is_shutdown_ = true;
// Invoke callback.
Finish(absl::OkStatus());
}
void SecurityHandshaker::OnPeerCheckedFn(void* arg, grpc_error_handle error) {
@ -349,8 +330,8 @@ grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (",
tsi_result_to_string(result), ")"));
}
connector_->check_peer(peer, args_->endpoint, args_->args, &auth_context_,
&on_peer_checked_);
connector_->check_peer(peer, args_->endpoint.get(), args_->args,
&auth_context_, &on_peer_checked_);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME);
const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
@ -374,7 +355,7 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
if (result == TSI_INCOMPLETE_DATA) {
CHECK_EQ(bytes_to_send_size, 0u);
grpc_endpoint_read(
args_->endpoint, args_->read_buffer,
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
@ -388,6 +369,8 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
if (security_connector != nullptr) {
connector_type = security_connector->type().name();
}
// TODO(roth): Get a better signal from the TSI layer as to what
// status code we should use here.
return GRPC_ERROR_CREATE(absl::StrCat(
connector_type, " handshake failed (", tsi_result_to_string(result),
")", (tsi_handshake_error_.empty() ? "" : ": "), tsi_handshake_error_));
@ -399,12 +382,11 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
}
if (bytes_to_send_size > 0) {
// Send data to peer, if needed.
grpc_slice to_send = grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size);
grpc_slice_buffer_reset_and_unref(&outgoing_);
grpc_slice_buffer_add(&outgoing_, to_send);
outgoing_.Clear();
outgoing_.Append(Slice::FromCopiedBuffer(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size));
grpc_endpoint_write(
args_->endpoint, &outgoing_,
args_->endpoint.get(), outgoing_.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_sent_to_peer_,
&SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler, this,
@ -413,7 +395,7 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
} else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer.
grpc_endpoint_read(
args_->endpoint, args_->read_buffer,
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
@ -435,7 +417,7 @@ void SecurityHandshaker::OnHandshakeNextDoneGrpcWrapper(
grpc_error_handle error = h->OnHandshakeNextDoneLocked(
result, bytes_to_send, bytes_to_send_size, handshaker_result);
if (!error.ok()) {
h->HandshakeFailedLocked(error);
h->HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
}
@ -463,102 +445,102 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
}
// This callback might be run inline while we are still holding on to the mutex,
// so schedule OnHandshakeDataReceivedFromPeerFn on ExecCtx to avoid a deadlock.
// so run OnHandshakeDataReceivedFromPeerFn asynchronously to avoid a deadlock.
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* h = static_cast<SecurityHandshaker*>(arg);
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&h->on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn,
h, grpc_schedule_on_exec_ctx),
error);
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataReceivedFromPeerFn(std::move(error));
});
}
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(
void* arg, grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker> h(static_cast<SecurityHandshaker*>(arg));
MutexLock lock(&h->mu_);
if (!error.ok() || h->is_shutdown_) {
h->HandshakeFailedLocked(
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
GRPC_ERROR_CREATE_REFERENCING("Handshake read failed", &error, 1));
return;
}
// Copy all slices received.
size_t bytes_received_size = h->MoveReadBufferIntoHandshakeBuffer();
size_t bytes_received_size = MoveReadBufferIntoHandshakeBuffer();
// Call TSI handshaker.
error = h->DoHandshakerNextLocked(h->handshake_buffer_, bytes_received_size);
error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
h->HandshakeFailedLocked(error);
HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
handshaker.release(); // Avoid unref
}
}
// This callback might be run inline while we are still holding on to the mutex,
// so schedule OnHandshakeDataSentToPeerFn on ExecCtx to avoid a deadlock.
// so run OnHandshakeDataSentToPeerFn asynchronously to avoid a deadlock.
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* h = static_cast<SecurityHandshaker*>(arg);
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer_,
&SecurityHandshaker::OnHandshakeDataSentToPeerFn, h,
grpc_schedule_on_exec_ctx),
error);
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataSentToPeerFn(std::move(error));
});
}
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(void* arg,
grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker> h(static_cast<SecurityHandshaker*>(arg));
MutexLock lock(&h->mu_);
if (!error.ok() || h->is_shutdown_) {
h->HandshakeFailedLocked(
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
GRPC_ERROR_CREATE_REFERENCING("Handshake write failed", &error, 1));
return;
}
// We may be done.
if (h->handshaker_result_ == nullptr) {
if (handshaker_result_ == nullptr) {
grpc_endpoint_read(
h->args_->endpoint, h->args_->read_buffer,
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&h->on_handshake_data_received_from_peer_,
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
h.get(), grpc_schedule_on_exec_ctx),
this, grpc_schedule_on_exec_ctx),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
error = h->CheckPeerLocked();
error = CheckPeerLocked();
if (!error.ok()) {
h->HandshakeFailedLocked(error);
HandshakeFailedLocked(error);
return;
}
}
h.release(); // Avoid unref
handshaker.release(); // Avoid unref
}
//
// public handshaker API
//
void SecurityHandshaker::Shutdown(grpc_error_handle why) {
void SecurityHandshaker::Shutdown(grpc_error_handle error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
is_shutdown_ = true;
connector_->cancel_check_peer(&on_peer_checked_, why);
connector_->cancel_check_peer(&on_peer_checked_, std::move(error));
tsi_handshaker_shutdown(handshaker_);
grpc_endpoint_destroy(args_->endpoint);
args_->endpoint = nullptr;
CleanupArgsForFailureLocked();
args_->endpoint.reset();
}
}
void SecurityHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) {
void SecurityHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
auto ref = Ref();
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = on_handshake_done;
on_handshake_done_ = std::move(on_handshake_done);
size_t bytes_received_size = MoveReadBufferIntoHandshakeBuffer();
grpc_error_handle error =
DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
@ -576,19 +558,13 @@ void SecurityHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
class FailHandshaker : public Handshaker {
public:
explicit FailHandshaker(absl::Status status) : status_(std::move(status)) {}
const char* name() const override { return "security_fail"; }
void Shutdown(grpc_error_handle /*why*/) override {}
void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override {
grpc_endpoint_destroy(args->endpoint);
args->endpoint = nullptr;
args->args = ChannelArgs();
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
args->read_buffer = nullptr;
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done, status_);
absl::string_view name() const override { return "security_fail"; }
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override {
InvokeOnHandshakeDone(args, std::move(on_handshake_done), status_);
}
void Shutdown(absl::Status /*error*/) override {}
private:
~FailHandshaker() override = default;

@ -19,8 +19,10 @@
#include "src/core/handshaker/tcp_connect/tcp_connect_handshaker.h"
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
@ -61,24 +63,23 @@ namespace {
class TCPConnectHandshaker : public Handshaker {
public:
explicit TCPConnectHandshaker(grpc_pollset_set* pollset_set);
void Shutdown(grpc_error_handle why) override;
void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override;
const char* name() const override { return "tcp_connect"; }
absl::string_view name() const override { return "tcp_connect"; }
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override;
void Shutdown(absl::Status error) override;
private:
~TCPConnectHandshaker() override;
void CleanupArgsForFailureLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void FinishLocked(grpc_error_handle error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void FinishLocked(absl::Status error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static void Connected(void* arg, grpc_error_handle error);
Mutex mu_;
bool shutdown_ ABSL_GUARDED_BY(mu_) = false;
// Endpoint and read buffer to destroy after a shutdown.
// Endpoint to destroy after a shutdown.
grpc_endpoint* endpoint_to_destroy_ ABSL_GUARDED_BY(mu_) = nullptr;
grpc_slice_buffer* read_buffer_to_destroy_ ABSL_GUARDED_BY(mu_) = nullptr;
grpc_closure* on_handshake_done_ ABSL_GUARDED_BY(mu_) = nullptr;
absl::AnyInvocable<void(absl::Status)> on_handshake_done_
ABSL_GUARDED_BY(mu_);
grpc_pollset_set* interested_parties_ = nullptr;
grpc_polling_entity pollent_;
HandshakerArgs* args_ = nullptr;
@ -99,33 +100,32 @@ TCPConnectHandshaker::TCPConnectHandshaker(grpc_pollset_set* pollset_set)
GRPC_CLOSURE_INIT(&connected_, Connected, this, grpc_schedule_on_exec_ctx);
}
void TCPConnectHandshaker::Shutdown(grpc_error_handle /*why*/) {
void TCPConnectHandshaker::Shutdown(absl::Status /*error*/) {
// TODO(anramach): After migration to EventEngine, cancel the in-progress
// TCP connection attempt.
{
MutexLock lock(&mu_);
if (!shutdown_) {
shutdown_ = true;
// If we are shutting down while connecting, respond back with
// handshake done.
// The callback from grpc_tcp_client_connect will perform
// the necessary clean up.
if (on_handshake_done_ != nullptr) {
CleanupArgsForFailureLocked();
FinishLocked(GRPC_ERROR_CREATE("tcp handshaker shutdown"));
}
MutexLock lock(&mu_);
if (!shutdown_) {
shutdown_ = true;
// If we are shutting down while connecting, respond back with
// handshake done.
// The callback from grpc_tcp_client_connect will perform
// the necessary clean up.
if (on_handshake_done_ != nullptr) {
// TODO(roth): When we remove the legacy grpc_error APIs, propagate the
// status passed to shutdown as part of the message here.
FinishLocked(GRPC_ERROR_CREATE("tcp handshaker shutdown"));
}
}
}
void TCPConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) {
void TCPConnectHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
{
MutexLock lock(&mu_);
on_handshake_done_ = on_handshake_done;
on_handshake_done_ = std::move(on_handshake_done);
}
CHECK_EQ(args->endpoint, nullptr);
CHECK_EQ(args->endpoint.get(), nullptr);
args_ = args;
absl::StatusOr<URI> uri = URI::Parse(
args->args.GetString(GRPC_ARG_TCP_HANDSHAKER_RESOLVED_ADDRESS).value());
@ -149,7 +149,7 @@ void TCPConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
Ref().release(); // Ref held by callback.
// As we fake the TCP client connection failure when shutdown is called
// we don't want to pass args->endpoint directly.
// Instead pass endpoint_ and swap this endpoint to
// Instead pass endpoint_to_destroy_ and swap this endpoint to
// args endpoint on success.
grpc_tcp_client_connect(
&connected_, &endpoint_to_destroy_, interested_parties_,
@ -171,21 +171,19 @@ void TCPConnectHandshaker::Connected(void* arg, grpc_error_handle error) {
self->endpoint_to_destroy_ = nullptr;
}
if (!self->shutdown_) {
self->CleanupArgsForFailureLocked();
self->shutdown_ = true;
self->FinishLocked(error);
self->FinishLocked(std::move(error));
} else {
// The on_handshake_done_ is already as part of shutdown when
// connecting So nothing to be done here other than unrefing the
// error.
// The on_handshake_done_ callback was already invoked as part of
// shutdown when connecting, so nothing to be done here.
}
return;
}
CHECK_NE(self->endpoint_to_destroy_, nullptr);
self->args_->endpoint = self->endpoint_to_destroy_;
self->args_->endpoint.reset(self->endpoint_to_destroy_);
self->endpoint_to_destroy_ = nullptr;
if (self->bind_endpoint_to_pollset_) {
grpc_endpoint_add_to_pollset_set(self->args_->endpoint,
grpc_endpoint_add_to_pollset_set(self->args_->endpoint.get(),
self->interested_parties_);
}
self->FinishLocked(absl::OkStatus());
@ -196,25 +194,14 @@ TCPConnectHandshaker::~TCPConnectHandshaker() {
if (endpoint_to_destroy_ != nullptr) {
grpc_endpoint_destroy(endpoint_to_destroy_);
}
if (read_buffer_to_destroy_ != nullptr) {
grpc_slice_buffer_destroy(read_buffer_to_destroy_);
gpr_free(read_buffer_to_destroy_);
}
grpc_pollset_set_destroy(interested_parties_);
}
void TCPConnectHandshaker::CleanupArgsForFailureLocked() {
read_buffer_to_destroy_ = args_->read_buffer;
args_->read_buffer = nullptr;
args_->args = ChannelArgs();
}
void TCPConnectHandshaker::FinishLocked(grpc_error_handle error) {
void TCPConnectHandshaker::FinishLocked(absl::Status error) {
if (interested_parties_ != nullptr) {
grpc_polling_entity_del_from_pollset_set(&pollent_, interested_parties_);
}
ExecCtx::Run(DEBUG_LOCATION, on_handshake_done_, error);
on_handshake_done_ = nullptr;
InvokeOnHandshakeDone(args_, std::move(on_handshake_done_), std::move(error));
}
//

@ -26,6 +26,7 @@ namespace grpc_core {
DebugOnlyTraceFlag auth_context_refcount_trace(false, "auth_context_refcount");
DebugOnlyTraceFlag call_combiner_trace(false, "call_combiner");
DebugOnlyTraceFlag call_refcount_trace(false, "call_refcount");
DebugOnlyTraceFlag call_state_trace(false, "call_state");
DebugOnlyTraceFlag closure_trace(false, "closure");
DebugOnlyTraceFlag combiner_trace(false, "combiner");
DebugOnlyTraceFlag cq_refcount_trace(false, "cq_refcount");
@ -229,6 +230,7 @@ const absl::flat_hash_map<std::string, TraceFlag*>& GetAllTraceFlags() {
{"auth_context_refcount", &auth_context_refcount_trace},
{"call_combiner", &call_combiner_trace},
{"call_refcount", &call_refcount_trace},
{"call_state", &call_state_trace},
{"closure", &closure_trace},
{"combiner", &combiner_trace},
{"cq_refcount", &cq_refcount_trace},

@ -26,6 +26,7 @@ namespace grpc_core {
extern DebugOnlyTraceFlag auth_context_refcount_trace;
extern DebugOnlyTraceFlag call_combiner_trace;
extern DebugOnlyTraceFlag call_refcount_trace;
extern DebugOnlyTraceFlag call_state_trace;
extern DebugOnlyTraceFlag closure_trace;
extern DebugOnlyTraceFlag combiner_trace;
extern DebugOnlyTraceFlag cq_refcount_trace;

@ -54,6 +54,10 @@ call_refcount:
debug_only: true
default: false
description: Refcount on call.
call_state:
debug_only: true
default: false
description: Traces transitions through the call spine state machine.
cares_address_sorting:
default: false
description: Operations of the c-ares based DNS resolver's address sorter.

@ -101,6 +101,8 @@ bool grpc_endpoint_can_track_err(grpc_endpoint* ep);
struct grpc_endpoint {
const grpc_endpoint_vtable* vtable;
void Orphan() { grpc_endpoint_destroy(this); }
};
#endif // GRPC_SRC_CORE_LIB_IOMGR_ENDPOINT_H

@ -254,699 +254,4 @@ RefCountedPtr<CallFilters::Stack> CallFilters::StackBuilder::Build() {
return RefCountedPtr<Stack>(new Stack(std::move(data_)));
}
///////////////////////////////////////////////////////////////////////////////
// CallState
namespace filters_detail {
CallState::CallState()
: client_to_server_pull_state_(ClientToServerPullState::kBegin),
client_to_server_push_state_(ClientToServerPushState::kIdle),
server_to_client_pull_state_(ServerToClientPullState::kUnstarted),
server_to_client_push_state_(ServerToClientPushState::kStart),
server_trailing_metadata_state_(ServerTrailingMetadataState::kNotPushed) {
}
void CallState::Start() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] Start: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kStarted;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kUnstartedReading:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "Start called twice";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
case ServerToClientPullState::kTerminated:
break;
}
}
void CallState::BeginPushClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedMessage;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "PushClientToServerMessage called twice concurrently";
break;
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "PushClientToServerMessage called after half-close";
break;
case ClientToServerPushState::kFinished:
break;
}
}
Poll<StatusFlag> CallState::PollPushClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
return Success{};
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
void CallState::ClientToServerHalfClose() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] ClientToServerHalfClose: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ =
ClientToServerPushState::kPushedMessageAndHalfClosed;
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "ClientToServerHalfClose called twice";
break;
case ClientToServerPushState::kFinished:
break;
}
}
void CallState::BeginPullClientInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientInitialMetadata;
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "BeginPullClientInitialMetadata called twice";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
void CallState::FinishPullClientInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
LOG(FATAL) << "FinishPullClientInitialMetadata called before Begin";
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "Out of order FinishPullClientInitialMetadata";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
Poll<ValueOrFailure<bool>> CallState::PollPullClientToServerMessageAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullClientToServerMessageAvailable: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
return client_to_server_pull_waiter_.pending();
case ClientToServerPullState::kIdle:
client_to_server_pull_state_ = ClientToServerPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ClientToServerPullState::kReading:
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "PollPullClientToServerMessageAvailable called while "
"processing a message";
break;
case ClientToServerPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(client_to_server_pull_state_, ClientToServerPullState::kReading);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientToServerMessage;
return true;
case ClientToServerPushState::kPushedHalfClose:
return false;
case ClientToServerPushState::kFinished:
client_to_server_pull_state_ = ClientToServerPullState::kTerminated;
return Failure{};
}
Crash("Unreachable");
}
void CallState::FinishPullClientToServerMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
LOG(FATAL) << "FinishPullClientToServerMessage called before Begin";
break;
case ClientToServerPullState::kIdle:
LOG(FATAL) << "FinishPullClientToServerMessage called twice";
break;
case ClientToServerPullState::kReading:
LOG(FATAL) << "FinishPullClientToServerMessage called before "
"PollPullClientToServerMessageAvailable";
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kTerminated:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ = ClientToServerPushState::kIdle;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "FinishPullClientToServerMessage called without a message";
break;
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kFinished:
break;
}
}
StatusFlag CallState::PushServerInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PushServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_,
server_trailing_metadata_state_);
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return Failure{};
}
CHECK_EQ(server_to_client_push_state_, ServerToClientPushState::kStart);
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadata;
server_to_client_push_waiter_.Wake();
return Success{};
}
void CallState::BeginPushServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] BeginPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "BeginPushServerToClientMessage called before "
"PushServerInitialMetadata";
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage;
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL) << "BeginPushServerToClientMessage called twice concurrently";
break;
case ServerToClientPushState::kTrailersOnly:
// Will fail in poll.
break;
case ServerToClientPushState::kIdle:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kFinished:
break;
}
}
Poll<StatusFlag> CallState::PollPushServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
LOG(FATAL) << "PollPushServerToClientMessage called before "
<< "PushServerInitialMetadata";
case ServerToClientPushState::kTrailersOnly:
return false;
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
return Success{};
case ServerToClientPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
bool CallState::PushServerTrailingMetadata(bool cancel) {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PushServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, cancel, server_trailing_metadata_state_,
server_to_client_push_state_,
client_to_server_push_state_,
server_trailing_metadata_waiter_.DebugString());
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_state_ =
cancel ? ServerTrailingMetadataState::kPushedCancel
: ServerTrailingMetadataState::kPushed;
server_trailing_metadata_waiter_.Wake();
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
server_to_client_push_state_ = ServerToClientPushState::kTrailersOnly;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kIdle:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kFinished:
case ServerToClientPushState::kTrailersOnly:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kFinished:
break;
}
return true;
}
Poll<bool> CallState::PollPullServerInitialMetadataAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullServerInitialMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
bool reading;
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
return false;
}
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
reading = true;
break;
case ServerToClientPullState::kStarted:
reading = false;
break;
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerInitialMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return false;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kStarted ||
server_to_client_pull_state_ ==
ServerToClientPullState::kStartedReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_pull_state_ =
reading
? ServerToClientPullState::kProcessingServerInitialMetadataReading
: ServerToClientPullState::kProcessingServerInitialMetadata;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL)
<< "PollPullServerInitialMetadataAvailable after metadata processed";
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return false;
case ServerToClientPushState::kTrailersOnly:
return false;
}
Crash("Unreachable");
}
void CallState::FinishPullServerInitialMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
LOG(FATAL) << "FinishPullServerInitialMetadata called before Start";
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
CHECK_EQ(server_to_client_push_state_,
ServerToClientPushState::kTrailersOnly);
return;
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "Out of order FinishPullServerInitialMetadata";
case ServerToClientPullState::kTerminated:
return;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kIdle ||
server_to_client_pull_state_ == ServerToClientPullState::kReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerInitialMetadata called before initial "
"metadata consumed";
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kFinished:
LOG(FATAL) << "FinishPullServerInitialMetadata called twice";
}
}
Poll<ValueOrFailure<bool>> CallState::PollPullServerToClientMessageAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollPullServerToClientMessageAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kUnstartedReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerInitialMetadataReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStarted:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kStartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
return false;
}
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kIdle:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kReading:
break;
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing a message";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing trailing metadata";
case ServerToClientPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(server_to_client_pull_state_, ServerToClientPullState::kReading);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_waiter_.pending();
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kTrailersOnly:
DCHECK_NE(server_trailing_metadata_state_,
ServerTrailingMetadataState::kNotPushed);
return false;
case ServerToClientPushState::kPushedMessage:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerToClientMessage;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return Failure{};
}
Crash("Unreachable");
}
void CallState::FinishPullServerToClientMessage() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
LOG(FATAL)
<< "FinishPullServerToClientMessage called before metadata available";
case ServerToClientPullState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called twice";
case ServerToClientPullState::kReading:
LOG(FATAL) << "FinishPullServerToClientMessage called before "
<< "PollPullServerToClientMessageAvailable";
case ServerToClientPullState::kProcessingServerToClientMessage:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "FinishPullServerToClientMessage called while processing "
"trailing metadata";
case ServerToClientPullState::kTerminated:
break;
}
switch (server_to_client_push_state_) {
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerToClientMessage called before initial "
"metadata consumed";
case ServerToClientPushState::kTrailersOnly:
LOG(FATAL) << "FinishPullServerToClientMessage called after "
"PushServerTrailingMetadata";
case ServerToClientPushState::kPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called without a message";
case ServerToClientPushState::kFinished:
break;
}
}
Poll<Empty> CallState::PollServerTrailingMetadataAvailable() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollServerTrailingMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kUnstartedReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kReading:
switch (server_to_client_push_state_) {
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kStart:
case ServerToClientPushState::kFinished:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
}
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
return server_trailing_metadata_waiter_.pending();
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollServerTrailingMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return Empty{};
}
Crash("Unreachable");
}
void CallState::FinishPullServerTrailingMetadata() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] FinishPullServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
LOG(FATAL) << "FinishPullServerTrailingMetadata called before "
"PollServerTrailingMetadataAvailable";
case ServerTrailingMetadataState::kPushed:
server_trailing_metadata_state_ = ServerTrailingMetadataState::kPulled;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPushedCancel:
server_trailing_metadata_state_ =
ServerTrailingMetadataState::kPulledCancel;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPulled:
case ServerTrailingMetadataState::kPulledCancel:
LOG(FATAL) << "FinishPullServerTrailingMetadata called twice";
}
}
Poll<bool> CallState::PollWasCancelled() {
GRPC_TRACE_LOG(call, INFO)
<< "[call_state] PollWasCancelled: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_);
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
case ServerTrailingMetadataState::kPushed:
case ServerTrailingMetadataState::kPushedCancel: {
return server_trailing_metadata_waiter_.pending();
}
case ServerTrailingMetadataState::kPulled:
return false;
case ServerTrailingMetadataState::kPulledCancel:
return true;
}
Crash("Unreachable");
}
std::string CallState::DebugString() const {
return absl::StrCat(
"client_to_server_pull_state:", client_to_server_pull_state_,
" client_to_server_push_state:", client_to_server_push_state_,
" server_to_client_pull_state:", server_to_client_pull_state_,
" server_to_client_message_push_state:", server_to_client_push_state_,
" server_trailing_metadata_state:", server_trailing_metadata_state_,
client_to_server_push_waiter_.DebugString(),
" server_to_client_push_waiter:",
server_to_client_push_waiter_.DebugString(),
" client_to_server_pull_waiter:",
client_to_server_pull_waiter_.DebugString(),
" server_to_client_pull_waiter:",
server_to_client_pull_waiter_.DebugString(),
" server_trailing_metadata_waiter:",
server_trailing_metadata_waiter_.DebugString());
}
static_assert(sizeof(CallState) <= 16, "CallState too large");
} // namespace filters_detail
} // namespace grpc_core

@ -36,6 +36,7 @@
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/transport/call_final_info.h"
#include "src/core/lib/transport/call_state.h"
#include "src/core/lib/transport/message.h"
#include "src/core/lib/transport/metadata.h"
@ -369,7 +370,7 @@ struct AddOpImpl<FilterType, T, const NoInterceptor*, which> {
// void $INTERCEPTOR_NAME($VALUE_TYPE&)
template <typename FilterType, typename T,
void (FilterType::Call::*impl)(typename T::element_type&)>
void (FilterType::Call::* impl)(typename T::element_type&)>
struct AddOpImpl<FilterType, T,
void (FilterType::Call::*)(typename T::element_type&), impl> {
static void Add(FilterType* channel_data, size_t call_offset, Layout<T>& to) {
@ -390,8 +391,8 @@ struct AddOpImpl<FilterType, T,
// void $INTERCEPTOR_NAME($VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T,
void (FilterType::Call::*impl)(typename T::element_type&,
FilterType*)>
void (FilterType::Call::* impl)(typename T::element_type&,
FilterType*)>
struct AddOpImpl<
FilterType, T,
void (FilterType::Call::*)(typename T::element_type&, FilterType*), impl> {
@ -414,7 +415,7 @@ struct AddOpImpl<
// $VALUE_HANDLE $INTERCEPTOR_NAME($VALUE_HANDLE, FilterType*)
template <typename FilterType, typename T,
T (FilterType::Call::*impl)(T, FilterType*)>
T (FilterType::Call::* impl)(T, FilterType*)>
struct AddOpImpl<FilterType, T, T (FilterType::Call::*)(T, FilterType*), impl> {
static void Add(FilterType* channel_data, size_t call_offset, Layout<T>& to) {
to.Add(
@ -437,7 +438,7 @@ struct AddOpImpl<FilterType, T, T (FilterType::Call::*)(T, FilterType*), impl> {
// absl::Status $INTERCEPTOR_NAME($VALUE_TYPE&)
template <typename FilterType, typename T,
absl::Status (FilterType::Call::*impl)(typename T::element_type&)>
absl::Status (FilterType::Call::* impl)(typename T::element_type&)>
struct AddOpImpl<FilterType, T,
absl::Status (FilterType::Call::*)(typename T::element_type&),
impl> {
@ -463,7 +464,7 @@ struct AddOpImpl<FilterType, T,
// absl::Status $INTERCEPTOR_NAME(const $VALUE_TYPE&)
template <typename FilterType, typename T,
absl::Status (FilterType::Call::*impl)(
absl::Status (FilterType::Call::* impl)(
const typename T::element_type&)>
struct AddOpImpl<
FilterType, T,
@ -490,8 +491,8 @@ struct AddOpImpl<
// absl::Status $INTERCEPTOR_NAME($VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T,
absl::Status (FilterType::Call::*impl)(typename T::element_type&,
FilterType*)>
absl::Status (FilterType::Call::* impl)(typename T::element_type&,
FilterType*)>
struct AddOpImpl<FilterType, T,
absl::Status (FilterType::Call::*)(typename T::element_type&,
FilterType*),
@ -519,7 +520,7 @@ struct AddOpImpl<FilterType, T,
// absl::Status $INTERCEPTOR_NAME(const $VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T,
absl::Status (FilterType::Call::*impl)(
absl::Status (FilterType::Call::* impl)(
const typename T::element_type&, FilterType*)>
struct AddOpImpl<FilterType, T,
absl::Status (FilterType::Call::*)(
@ -548,7 +549,7 @@ struct AddOpImpl<FilterType, T,
// absl::StatusOr<$VALUE_HANDLE> $INTERCEPTOR_NAME($VALUE_HANDLE, FilterType*)
template <typename FilterType, typename T,
absl::StatusOr<T> (FilterType::Call::*impl)(T, FilterType*)>
absl::StatusOr<T> (FilterType::Call::* impl)(T, FilterType*)>
struct AddOpImpl<FilterType, T,
absl::StatusOr<T> (FilterType::Call::*)(T, FilterType*),
impl> {
@ -575,7 +576,7 @@ struct AddOpImpl<FilterType, T,
// ServerMetadataHandle $INTERCEPTOR_NAME($VALUE_TYPE&)
template <typename FilterType, typename T,
ServerMetadataHandle (FilterType::Call::*impl)(
ServerMetadataHandle (FilterType::Call::* impl)(
typename T::element_type&)>
struct AddOpImpl<FilterType, T,
ServerMetadataHandle (FilterType::Call::*)(
@ -603,7 +604,7 @@ struct AddOpImpl<FilterType, T,
// ServerMetadataHandle $INTERCEPTOR_NAME(const $VALUE_TYPE&)
template <typename FilterType, typename T,
ServerMetadataHandle (FilterType::Call::*impl)(
ServerMetadataHandle (FilterType::Call::* impl)(
const typename T::element_type&)>
struct AddOpImpl<FilterType, T,
ServerMetadataHandle (FilterType::Call::*)(
@ -631,7 +632,7 @@ struct AddOpImpl<FilterType, T,
// ServerMetadataHandle $INTERCEPTOR_NAME($VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T,
ServerMetadataHandle (FilterType::Call::*impl)(
ServerMetadataHandle (FilterType::Call::* impl)(
typename T::element_type&, FilterType*)>
struct AddOpImpl<FilterType, T,
ServerMetadataHandle (FilterType::Call::*)(
@ -660,7 +661,7 @@ struct AddOpImpl<FilterType, T,
// ServerMetadataHandle $INTERCEPTOR_NAME(const $VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T,
ServerMetadataHandle (FilterType::Call::*impl)(
ServerMetadataHandle (FilterType::Call::* impl)(
const typename T::element_type&, FilterType*)>
struct AddOpImpl<FilterType, T,
ServerMetadataHandle (FilterType::Call::*)(
@ -689,7 +690,7 @@ struct AddOpImpl<FilterType, T,
// PROMISE_RETURNING(absl::Status) $INTERCEPTOR_NAME($VALUE_TYPE&)
template <typename FilterType, typename T, typename R,
R (FilterType::Call::*impl)(typename T::element_type&)>
R (FilterType::Call::* impl)(typename T::element_type&)>
struct AddOpImpl<
FilterType, T, R (FilterType::Call::*)(typename T::element_type&), impl,
absl::enable_if_t<std::is_same<absl::Status, PromiseResult<R>>::value>> {
@ -739,7 +740,7 @@ struct AddOpImpl<
// PROMISE_RETURNING(absl::Status) $INTERCEPTOR_NAME($VALUE_TYPE&, FilterType*)
template <typename FilterType, typename T, typename R,
R (FilterType::Call::*impl)(typename T::element_type&, FilterType*)>
R (FilterType::Call::* impl)(typename T::element_type&, FilterType*)>
struct AddOpImpl<
FilterType, T,
R (FilterType::Call::*)(typename T::element_type&, FilterType*), impl,
@ -794,7 +795,7 @@ struct AddOpImpl<
// PROMISE_RETURNING(absl::StatusOr<$VALUE_HANDLE>)
// $INTERCEPTOR_NAME($VALUE_HANDLE, FilterType*)
template <typename FilterType, typename T, typename R,
R (FilterType::Call::*impl)(T, FilterType*)>
R (FilterType::Call::* impl)(T, FilterType*)>
struct AddOpImpl<FilterType, T, R (FilterType::Call::*)(T, FilterType*), impl,
absl::enable_if_t<std::is_same<absl::StatusOr<T>,
PromiseResult<R>>::value>> {
@ -1029,7 +1030,7 @@ struct StackData {
template <typename FilterType>
void AddFinalizer(FilterType* channel_data, size_t call_offset,
void (FilterType::Call::*p)(const grpc_call_final_info*)) {
void (FilterType::Call::* p)(const grpc_call_final_info*)) {
DCHECK(p == &FilterType::Call::OnFinalize);
finalizers.push_back(Finalizer{
channel_data,
@ -1043,8 +1044,8 @@ struct StackData {
template <typename FilterType>
void AddFinalizer(FilterType* channel_data, size_t call_offset,
void (FilterType::Call::*p)(const grpc_call_final_info*,
FilterType*)) {
void (FilterType::Call::* p)(const grpc_call_final_info*,
FilterType*)) {
DCHECK(p == &FilterType::Call::OnFinalize);
finalizers.push_back(Finalizer{
channel_data,
@ -1115,244 +1116,6 @@ class OperationExecutor {
const Operator<T>* end_ops_;
};
class CallState {
public:
CallState();
// Start the call: allows pulls to proceed
void Start();
// PUSH: client -> server
void BeginPushClientToServerMessage();
Poll<StatusFlag> PollPushClientToServerMessage();
void ClientToServerHalfClose();
// PULL: client -> server
void BeginPullClientInitialMetadata();
void FinishPullClientInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullClientToServerMessageAvailable();
void FinishPullClientToServerMessage();
// PUSH: server -> client
StatusFlag PushServerInitialMetadata();
void BeginPushServerToClientMessage();
Poll<StatusFlag> PollPushServerToClientMessage();
bool PushServerTrailingMetadata(bool cancel);
// PULL: server -> client
Poll<bool> PollPullServerInitialMetadataAvailable();
void FinishPullServerInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullServerToClientMessageAvailable();
void FinishPullServerToClientMessage();
Poll<Empty> PollServerTrailingMetadataAvailable();
void FinishPullServerTrailingMetadata();
Poll<bool> PollWasCancelled();
// Debug
std::string DebugString() const;
friend std::ostream& operator<<(std::ostream& out,
const CallState& call_state) {
return out << call_state.DebugString();
}
private:
enum class ClientToServerPullState : uint16_t {
// Ready to read: client initial metadata is there, but not yet processed
kBegin,
// Processing client initial metadata
kProcessingClientInitialMetadata,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingClientToServerMessage,
// Processing complete
kTerminated,
};
static const char* ClientToServerPullStateString(
ClientToServerPullState state) {
switch (state) {
case ClientToServerPullState::kBegin:
return "Begin";
case ClientToServerPullState::kProcessingClientInitialMetadata:
return "ProcessingClientInitialMetadata";
case ClientToServerPullState::kIdle:
return "Idle";
case ClientToServerPullState::kReading:
return "Reading";
case ClientToServerPullState::kProcessingClientToServerMessage:
return "ProcessingClientToServerMessage";
case ClientToServerPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPullState state) {
out.Append(ClientToServerPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPullState state) {
return out << ClientToServerPullStateString(state);
}
enum class ClientToServerPushState : uint16_t {
kIdle,
kPushedMessage,
kPushedHalfClose,
kPushedMessageAndHalfClosed,
kFinished,
};
static const char* ClientToServerPushStateString(
ClientToServerPushState state) {
switch (state) {
case ClientToServerPushState::kIdle:
return "Idle";
case ClientToServerPushState::kPushedMessage:
return "PushedMessage";
case ClientToServerPushState::kPushedHalfClose:
return "PushedHalfClose";
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return "PushedMessageAndHalfClosed";
case ClientToServerPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPushState state) {
out.Append(ClientToServerPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPushState state) {
return out << ClientToServerPushStateString(state);
}
enum class ServerToClientPullState : uint16_t {
// Not yet started: cannot read
kUnstarted,
kUnstartedReading,
kStarted,
kStartedReading,
// Processing server initial metadata
kProcessingServerInitialMetadata,
kProcessingServerInitialMetadataReading,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingServerToClientMessage,
// Processing server trailing metadata
kProcessingServerTrailingMetadata,
kTerminated,
};
static const char* ServerToClientPullStateString(
ServerToClientPullState state) {
switch (state) {
case ServerToClientPullState::kUnstarted:
return "Unstarted";
case ServerToClientPullState::kUnstartedReading:
return "UnstartedReading";
case ServerToClientPullState::kStarted:
return "Started";
case ServerToClientPullState::kStartedReading:
return "StartedReading";
case ServerToClientPullState::kProcessingServerInitialMetadata:
return "ProcessingServerInitialMetadata";
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return "ProcessingServerInitialMetadataReading";
case ServerToClientPullState::kIdle:
return "Idle";
case ServerToClientPullState::kReading:
return "Reading";
case ServerToClientPullState::kProcessingServerToClientMessage:
return "ProcessingServerToClientMessage";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
return "ProcessingServerTrailingMetadata";
case ServerToClientPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPullState state) {
out.Append(ServerToClientPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPullState state) {
return out << ServerToClientPullStateString(state);
}
enum class ServerToClientPushState : uint16_t {
kStart,
kPushedServerInitialMetadata,
kPushedServerInitialMetadataAndPushedMessage,
kTrailersOnly,
kIdle,
kPushedMessage,
kFinished,
};
static const char* ServerToClientPushStateString(
ServerToClientPushState state) {
switch (state) {
case ServerToClientPushState::kStart:
return "Start";
case ServerToClientPushState::kPushedServerInitialMetadata:
return "PushedServerInitialMetadata";
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
return "PushedServerInitialMetadataAndPushedMessage";
case ServerToClientPushState::kTrailersOnly:
return "TrailersOnly";
case ServerToClientPushState::kIdle:
return "Idle";
case ServerToClientPushState::kPushedMessage:
return "PushedMessage";
case ServerToClientPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPushState state) {
out.Append(ServerToClientPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPushState state) {
return out << ServerToClientPushStateString(state);
}
enum class ServerTrailingMetadataState : uint16_t {
kNotPushed,
kPushed,
kPushedCancel,
kPulled,
kPulledCancel,
};
static const char* ServerTrailingMetadataStateString(
ServerTrailingMetadataState state) {
switch (state) {
case ServerTrailingMetadataState::kNotPushed:
return "NotPushed";
case ServerTrailingMetadataState::kPushed:
return "Pushed";
case ServerTrailingMetadataState::kPushedCancel:
return "PushedCancel";
case ServerTrailingMetadataState::kPulled:
return "Pulled";
case ServerTrailingMetadataState::kPulledCancel:
return "PulledCancel";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerTrailingMetadataState state) {
out.Append(ServerTrailingMetadataStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerTrailingMetadataState state) {
return out << ServerTrailingMetadataStateString(state);
}
ClientToServerPullState client_to_server_pull_state_ : 3;
ClientToServerPushState client_to_server_push_state_ : 3;
ServerToClientPullState server_to_client_pull_state_ : 4;
ServerToClientPushState server_to_client_push_state_ : 3;
ServerTrailingMetadataState server_trailing_metadata_state_ : 3;
IntraActivityWaiter client_to_server_pull_waiter_;
IntraActivityWaiter server_to_client_pull_waiter_;
IntraActivityWaiter client_to_server_push_waiter_;
IntraActivityWaiter server_to_client_push_waiter_;
IntraActivityWaiter server_trailing_metadata_waiter_;
};
template <typename Fn>
class ServerTrailingMetadataInterceptor {
public:
@ -1533,10 +1296,10 @@ class CallFilters {
}
private:
template <
typename Output, typename Input, Input(CallFilters::*input_location),
filters_detail::Layout<Input>(filters_detail::StackData::*layout),
void (filters_detail::CallState::*on_done)(), typename StackIterator>
template <typename Output, typename Input,
Input(CallFilters::* input_location),
filters_detail::Layout<Input>(filters_detail::StackData::* layout),
void (CallState::* on_done)(), typename StackIterator>
class Executor {
public:
Executor(CallFilters* filters, StackIterator stack_begin,
@ -1596,7 +1359,7 @@ class CallFilters {
return Executor<ClientMetadataHandle, ClientMetadataHandle,
&CallFilters::push_client_initial_metadata_,
&filters_detail::StackData::client_initial_metadata,
&filters_detail::CallState::FinishPullClientInitialMetadata,
&CallState::FinishPullClientInitialMetadata,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
}
@ -1623,8 +1386,7 @@ class CallFilters {
ServerMetadataHandle,
&CallFilters::push_server_initial_metadata_,
&filters_detail::StackData::server_initial_metadata,
&filters_detail::CallState::
FinishPullServerInitialMetadata,
&CallState::FinishPullServerInitialMetadata,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend()),
[](ValueOrFailure<absl::optional<ServerMetadataHandle>> r) {
@ -1663,7 +1425,7 @@ class CallFilters {
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_client_to_server_message_,
&filters_detail::StackData::client_to_server_messages,
&filters_detail::CallState::FinishPullClientToServerMessage,
&CallState::FinishPullClientToServerMessage,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
},
@ -1694,7 +1456,7 @@ class CallFilters {
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_server_to_client_message_,
&filters_detail::StackData::server_to_client_messages,
&filters_detail::CallState::FinishPullServerToClientMessage,
&CallState::FinishPullServerToClientMessage,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend());
},
@ -1755,7 +1517,7 @@ class CallFilters {
StacksVector stacks_;
filters_detail::CallState call_state_;
CallState call_state_;
void* call_data_;
ClientMetadataHandle push_client_initial_metadata_;

@ -0,0 +1,39 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/call_state.h"
namespace grpc_core {
std::string CallState::DebugString() const {
return absl::StrCat(
"client_to_server_pull_state:", client_to_server_pull_state_,
" client_to_server_push_state:", client_to_server_push_state_,
" server_to_client_pull_state:", server_to_client_pull_state_,
" server_to_client_message_push_state:", server_to_client_push_state_,
" server_trailing_metadata_state:", server_trailing_metadata_state_,
client_to_server_push_waiter_.DebugString(),
" server_to_client_push_waiter:",
server_to_client_push_waiter_.DebugString(),
" client_to_server_pull_waiter:",
client_to_server_pull_waiter_.DebugString(),
" server_to_client_pull_waiter:",
server_to_client_pull_waiter_.DebugString(),
" server_trailing_metadata_waiter:",
server_trailing_metadata_waiter_.DebugString());
}
static_assert(sizeof(CallState) <= 16, "CallState too large");
} // namespace grpc_core

@ -0,0 +1,957 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H
#include "absl/types/optional.h"
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
namespace grpc_core {
class CallState {
public:
CallState();
// Start the call: allows pulls to proceed
void Start();
// PUSH: client -> server
void BeginPushClientToServerMessage();
Poll<StatusFlag> PollPushClientToServerMessage();
void ClientToServerHalfClose();
// PULL: client -> server
void BeginPullClientInitialMetadata();
void FinishPullClientInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullClientToServerMessageAvailable();
void FinishPullClientToServerMessage();
// PUSH: server -> client
StatusFlag PushServerInitialMetadata();
void BeginPushServerToClientMessage();
Poll<StatusFlag> PollPushServerToClientMessage();
bool PushServerTrailingMetadata(bool cancel);
// PULL: server -> client
Poll<bool> PollPullServerInitialMetadataAvailable();
void FinishPullServerInitialMetadata();
Poll<ValueOrFailure<bool>> PollPullServerToClientMessageAvailable();
void FinishPullServerToClientMessage();
Poll<Empty> PollServerTrailingMetadataAvailable();
void FinishPullServerTrailingMetadata();
Poll<bool> PollWasCancelled();
// Debug
std::string DebugString() const;
friend std::ostream& operator<<(std::ostream& out,
const CallState& call_state) {
return out << call_state.DebugString();
}
private:
enum class ClientToServerPullState : uint16_t {
// Ready to read: client initial metadata is there, but not yet processed
kBegin,
// Processing client initial metadata
kProcessingClientInitialMetadata,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingClientToServerMessage,
// Processing complete
kTerminated,
};
static const char* ClientToServerPullStateString(
ClientToServerPullState state) {
switch (state) {
case ClientToServerPullState::kBegin:
return "Begin";
case ClientToServerPullState::kProcessingClientInitialMetadata:
return "ProcessingClientInitialMetadata";
case ClientToServerPullState::kIdle:
return "Idle";
case ClientToServerPullState::kReading:
return "Reading";
case ClientToServerPullState::kProcessingClientToServerMessage:
return "ProcessingClientToServerMessage";
case ClientToServerPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPullState state) {
out.Append(ClientToServerPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPullState state) {
return out << ClientToServerPullStateString(state);
}
enum class ClientToServerPushState : uint16_t {
kIdle,
kPushedMessage,
kPushedHalfClose,
kPushedMessageAndHalfClosed,
kFinished,
};
static const char* ClientToServerPushStateString(
ClientToServerPushState state) {
switch (state) {
case ClientToServerPushState::kIdle:
return "Idle";
case ClientToServerPushState::kPushedMessage:
return "PushedMessage";
case ClientToServerPushState::kPushedHalfClose:
return "PushedHalfClose";
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return "PushedMessageAndHalfClosed";
case ClientToServerPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ClientToServerPushState state) {
out.Append(ClientToServerPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ClientToServerPushState state) {
return out << ClientToServerPushStateString(state);
}
enum class ServerToClientPullState : uint16_t {
// Not yet started: cannot read
kUnstarted,
kUnstartedReading,
kStarted,
kStartedReading,
// Processing server initial metadata
kProcessingServerInitialMetadata,
kProcessingServerInitialMetadataReading,
// Main call loop: not reading
kIdle,
// Main call loop: reading but no message available
kReading,
// Main call loop: processing one message
kProcessingServerToClientMessage,
// Processing server trailing metadata
kProcessingServerTrailingMetadata,
kTerminated,
};
static const char* ServerToClientPullStateString(
ServerToClientPullState state) {
switch (state) {
case ServerToClientPullState::kUnstarted:
return "Unstarted";
case ServerToClientPullState::kUnstartedReading:
return "UnstartedReading";
case ServerToClientPullState::kStarted:
return "Started";
case ServerToClientPullState::kStartedReading:
return "StartedReading";
case ServerToClientPullState::kProcessingServerInitialMetadata:
return "ProcessingServerInitialMetadata";
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return "ProcessingServerInitialMetadataReading";
case ServerToClientPullState::kIdle:
return "Idle";
case ServerToClientPullState::kReading:
return "Reading";
case ServerToClientPullState::kProcessingServerToClientMessage:
return "ProcessingServerToClientMessage";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
return "ProcessingServerTrailingMetadata";
case ServerToClientPullState::kTerminated:
return "Terminated";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPullState state) {
out.Append(ServerToClientPullStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPullState state) {
return out << ServerToClientPullStateString(state);
}
enum class ServerToClientPushState : uint16_t {
kStart,
kPushedServerInitialMetadata,
kPushedServerInitialMetadataAndPushedMessage,
kTrailersOnly,
kIdle,
kPushedMessage,
kFinished,
};
static const char* ServerToClientPushStateString(
ServerToClientPushState state) {
switch (state) {
case ServerToClientPushState::kStart:
return "Start";
case ServerToClientPushState::kPushedServerInitialMetadata:
return "PushedServerInitialMetadata";
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
return "PushedServerInitialMetadataAndPushedMessage";
case ServerToClientPushState::kTrailersOnly:
return "TrailersOnly";
case ServerToClientPushState::kIdle:
return "Idle";
case ServerToClientPushState::kPushedMessage:
return "PushedMessage";
case ServerToClientPushState::kFinished:
return "Finished";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerToClientPushState state) {
out.Append(ServerToClientPushStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerToClientPushState state) {
return out << ServerToClientPushStateString(state);
}
enum class ServerTrailingMetadataState : uint16_t {
kNotPushed,
kPushed,
kPushedCancel,
kPulled,
kPulledCancel,
};
static const char* ServerTrailingMetadataStateString(
ServerTrailingMetadataState state) {
switch (state) {
case ServerTrailingMetadataState::kNotPushed:
return "NotPushed";
case ServerTrailingMetadataState::kPushed:
return "Pushed";
case ServerTrailingMetadataState::kPushedCancel:
return "PushedCancel";
case ServerTrailingMetadataState::kPulled:
return "Pulled";
case ServerTrailingMetadataState::kPulledCancel:
return "PulledCancel";
}
}
template <typename Sink>
friend void AbslStringify(Sink& out, ServerTrailingMetadataState state) {
out.Append(ServerTrailingMetadataStateString(state));
}
friend std::ostream& operator<<(std::ostream& out,
ServerTrailingMetadataState state) {
return out << ServerTrailingMetadataStateString(state);
}
ClientToServerPullState client_to_server_pull_state_ : 3;
ClientToServerPushState client_to_server_push_state_ : 3;
ServerToClientPullState server_to_client_pull_state_ : 4;
ServerToClientPushState server_to_client_push_state_ : 3;
ServerTrailingMetadataState server_trailing_metadata_state_ : 3;
IntraActivityWaiter client_to_server_pull_waiter_;
IntraActivityWaiter server_to_client_pull_waiter_;
IntraActivityWaiter client_to_server_push_waiter_;
IntraActivityWaiter server_to_client_push_waiter_;
IntraActivityWaiter server_trailing_metadata_waiter_;
};
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline CallState::CallState()
: client_to_server_pull_state_(ClientToServerPullState::kBegin),
client_to_server_push_state_(ClientToServerPushState::kIdle),
server_to_client_pull_state_(ServerToClientPullState::kUnstarted),
server_to_client_push_state_(ServerToClientPushState::kStart),
server_trailing_metadata_state_(ServerTrailingMetadataState::kNotPushed) {
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void CallState::Start() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] Start: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kStarted;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kUnstartedReading:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "Start called twice";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
case ServerToClientPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPushClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedMessage;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "PushClientToServerMessage called twice concurrently";
break;
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "PushClientToServerMessage called after half-close";
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<StatusFlag>
CallState::PollPushClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPushClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
return Success{};
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::ClientToServerHalfClose() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] ClientToServerHalfClose: "
<< GRPC_DUMP_ARGS(this, client_to_server_push_state_);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ =
ClientToServerPushState::kPushedMessageAndHalfClosed;
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
LOG(FATAL) << "ClientToServerHalfClose called twice";
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPullClientInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientInitialMetadata;
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "BeginPullClientInitialMetadata called twice";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullClientInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullClientInitialMetadata: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
LOG(FATAL) << "FinishPullClientInitialMetadata called before Begin";
break;
case ClientToServerPullState::kProcessingClientInitialMetadata:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kIdle:
case ClientToServerPullState::kReading:
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "Out of order FinishPullClientInitialMetadata";
break;
case ClientToServerPullState::kTerminated:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<ValueOrFailure<bool>>
CallState::PollPullClientToServerMessageAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullClientToServerMessageAvailable: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
return client_to_server_pull_waiter_.pending();
case ClientToServerPullState::kIdle:
client_to_server_pull_state_ = ClientToServerPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ClientToServerPullState::kReading:
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
LOG(FATAL) << "PollPullClientToServerMessageAvailable called while "
"processing a message";
break;
case ClientToServerPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(client_to_server_pull_state_, ClientToServerPullState::kReading);
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
return client_to_server_push_waiter_.pending();
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_pull_state_ =
ClientToServerPullState::kProcessingClientToServerMessage;
return true;
case ClientToServerPushState::kPushedHalfClose:
return false;
case ClientToServerPushState::kFinished:
client_to_server_pull_state_ = ClientToServerPullState::kTerminated;
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullClientToServerMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullClientToServerMessage: "
<< GRPC_DUMP_ARGS(this, client_to_server_pull_state_,
client_to_server_push_state_);
switch (client_to_server_pull_state_) {
case ClientToServerPullState::kBegin:
case ClientToServerPullState::kProcessingClientInitialMetadata:
LOG(FATAL) << "FinishPullClientToServerMessage called before Begin";
break;
case ClientToServerPullState::kIdle:
LOG(FATAL) << "FinishPullClientToServerMessage called twice";
break;
case ClientToServerPullState::kReading:
LOG(FATAL) << "FinishPullClientToServerMessage called before "
"PollPullClientToServerMessageAvailable";
break;
case ClientToServerPullState::kProcessingClientToServerMessage:
client_to_server_pull_state_ = ClientToServerPullState::kIdle;
client_to_server_pull_waiter_.Wake();
break;
case ClientToServerPullState::kTerminated:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kPushedMessage:
client_to_server_push_state_ = ClientToServerPushState::kIdle;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kIdle:
case ClientToServerPushState::kPushedHalfClose:
LOG(FATAL) << "FinishPullClientToServerMessage called without a message";
break;
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kPushedHalfClose;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline StatusFlag
CallState::PushServerInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PushServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_,
server_trailing_metadata_state_);
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return Failure{};
}
CHECK_EQ(server_to_client_push_state_, ServerToClientPushState::kStart);
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadata;
server_to_client_push_waiter_.Wake();
return Success{};
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::BeginPushServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] BeginPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "BeginPushServerToClientMessage called before "
"PushServerInitialMetadata";
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ =
ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage;
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL) << "BeginPushServerToClientMessage called twice concurrently";
break;
case ServerToClientPushState::kTrailersOnly:
// Will fail in poll.
break;
case ServerToClientPushState::kIdle:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<StatusFlag>
CallState::PollPushServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPushServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_push_state_);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
LOG(FATAL) << "PollPushServerToClientMessage called before "
<< "PushServerInitialMetadata";
case ServerToClientPushState::kTrailersOnly:
return false;
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
return Success{};
case ServerToClientPushState::kFinished:
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline bool
CallState::PushServerTrailingMetadata(bool cancel) {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PushServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, cancel, server_trailing_metadata_state_,
server_to_client_push_state_,
client_to_server_push_state_,
server_trailing_metadata_waiter_.DebugString());
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_state_ =
cancel ? ServerTrailingMetadataState::kPushedCancel
: ServerTrailingMetadataState::kPushed;
server_trailing_metadata_waiter_.Wake();
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
server_to_client_push_state_ = ServerToClientPushState::kTrailersOnly;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kIdle:
if (cancel) {
server_to_client_push_state_ = ServerToClientPushState::kFinished;
server_to_client_push_waiter_.Wake();
}
break;
case ServerToClientPushState::kFinished:
case ServerToClientPushState::kTrailersOnly:
break;
}
switch (client_to_server_push_state_) {
case ClientToServerPushState::kIdle:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedMessage:
case ClientToServerPushState::kPushedMessageAndHalfClosed:
client_to_server_push_state_ = ClientToServerPushState::kFinished;
client_to_server_push_waiter_.Wake();
break;
case ClientToServerPushState::kPushedHalfClose:
case ClientToServerPushState::kFinished:
break;
}
return true;
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<bool>
CallState::PollPullServerInitialMetadataAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullServerInitialMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
bool reading;
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
return false;
}
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
reading = true;
break;
case ServerToClientPullState::kStarted:
reading = false;
break;
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerInitialMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return false;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kStarted ||
server_to_client_pull_state_ ==
ServerToClientPullState::kStartedReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_pull_state_ =
reading
? ServerToClientPullState::kProcessingServerInitialMetadataReading
: ServerToClientPullState::kProcessingServerInitialMetadata;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
LOG(FATAL)
<< "PollPullServerInitialMetadataAvailable after metadata processed";
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return false;
case ServerToClientPushState::kTrailersOnly:
return false;
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerInitialMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerInitialMetadata: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
LOG(FATAL) << "FinishPullServerInitialMetadata called before Start";
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
CHECK_EQ(server_to_client_push_state_,
ServerToClientPushState::kTrailersOnly);
return;
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kIdle:
case ServerToClientPullState::kReading:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "Out of order FinishPullServerInitialMetadata";
case ServerToClientPullState::kTerminated:
return;
}
DCHECK(server_to_client_pull_state_ == ServerToClientPullState::kIdle ||
server_to_client_pull_state_ == ServerToClientPullState::kReading)
<< server_to_client_pull_state_;
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerInitialMetadata called before initial "
"metadata consumed";
case ServerToClientPushState::kPushedServerInitialMetadata:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kPushedMessage;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kPushedMessage:
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kFinished:
LOG(FATAL) << "FinishPullServerInitialMetadata called twice";
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<ValueOrFailure<bool>>
CallState::PollPullServerToClientMessageAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollPullServerToClientMessageAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
server_to_client_pull_state_ = ServerToClientPullState::kUnstartedReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kProcessingServerInitialMetadata:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerInitialMetadataReading;
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStarted:
server_to_client_pull_state_ = ServerToClientPullState::kStartedReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kStartedReading:
if (server_to_client_push_state_ ==
ServerToClientPushState::kTrailersOnly) {
return false;
}
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kIdle:
server_to_client_pull_state_ = ServerToClientPullState::kReading;
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPullState::kReading:
break;
case ServerToClientPullState::kProcessingServerToClientMessage:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing a message";
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollPullServerToClientMessageAvailable called while "
"processing trailing metadata";
case ServerToClientPullState::kTerminated:
return Failure{};
}
DCHECK_EQ(server_to_client_pull_state_, ServerToClientPullState::kReading);
switch (server_to_client_push_state_) {
case ServerToClientPushState::kStart:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
return false;
}
server_trailing_metadata_waiter_.pending();
return server_to_client_push_waiter_.pending();
case ServerToClientPushState::kTrailersOnly:
DCHECK_NE(server_trailing_metadata_state_,
ServerTrailingMetadataState::kNotPushed);
return false;
case ServerToClientPushState::kPushedMessage:
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerToClientMessage;
server_to_client_pull_waiter_.Wake();
return true;
case ServerToClientPushState::kFinished:
server_to_client_pull_state_ = ServerToClientPullState::kTerminated;
server_to_client_pull_waiter_.Wake();
return Failure{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerToClientMessage() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerToClientMessage: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_);
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kUnstartedReading:
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
LOG(FATAL)
<< "FinishPullServerToClientMessage called before metadata available";
case ServerToClientPullState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called twice";
case ServerToClientPullState::kReading:
LOG(FATAL) << "FinishPullServerToClientMessage called before "
<< "PollPullServerToClientMessageAvailable";
case ServerToClientPullState::kProcessingServerToClientMessage:
server_to_client_pull_state_ = ServerToClientPullState::kIdle;
server_to_client_pull_waiter_.Wake();
break;
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "FinishPullServerToClientMessage called while processing "
"trailing metadata";
case ServerToClientPullState::kTerminated:
break;
}
switch (server_to_client_push_state_) {
case ServerToClientPushState::kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::kStart:
LOG(FATAL) << "FinishPullServerToClientMessage called before initial "
"metadata consumed";
case ServerToClientPushState::kTrailersOnly:
LOG(FATAL) << "FinishPullServerToClientMessage called after "
"PushServerTrailingMetadata";
case ServerToClientPushState::kPushedMessage:
server_to_client_push_state_ = ServerToClientPushState::kIdle;
server_to_client_push_waiter_.Wake();
break;
case ServerToClientPushState::kIdle:
LOG(FATAL) << "FinishPullServerToClientMessage called without a message";
case ServerToClientPushState::kFinished:
break;
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<Empty>
CallState::PollServerTrailingMetadataAvailable() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollServerTrailingMetadataAvailable: "
<< GRPC_DUMP_ARGS(this, server_to_client_pull_state_,
server_to_client_push_state_,
server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_to_client_pull_state_) {
case ServerToClientPullState::kProcessingServerInitialMetadata:
case ServerToClientPullState::kProcessingServerToClientMessage:
case ServerToClientPullState::kProcessingServerInitialMetadataReading:
case ServerToClientPullState::kUnstartedReading:
return server_to_client_pull_waiter_.pending();
case ServerToClientPullState::kStartedReading:
case ServerToClientPullState::kReading:
switch (server_to_client_push_state_) {
case ServerToClientPushState::kTrailersOnly:
case ServerToClientPushState::kIdle:
case ServerToClientPushState::kStart:
case ServerToClientPushState::kFinished:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
ABSL_FALLTHROUGH_INTENDED;
case ServerToClientPushState::kPushedServerInitialMetadata:
case ServerToClientPushState::
kPushedServerInitialMetadataAndPushedMessage:
case ServerToClientPushState::kPushedMessage:
server_to_client_push_waiter_.pending();
return server_to_client_pull_waiter_.pending();
}
break;
case ServerToClientPullState::kStarted:
case ServerToClientPullState::kUnstarted:
case ServerToClientPullState::kIdle:
if (server_trailing_metadata_state_ !=
ServerTrailingMetadataState::kNotPushed) {
server_to_client_pull_state_ =
ServerToClientPullState::kProcessingServerTrailingMetadata;
server_to_client_pull_waiter_.Wake();
return Empty{};
}
return server_trailing_metadata_waiter_.pending();
case ServerToClientPullState::kProcessingServerTrailingMetadata:
LOG(FATAL) << "PollServerTrailingMetadataAvailable called twice";
case ServerToClientPullState::kTerminated:
return Empty{};
}
Crash("Unreachable");
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline void
CallState::FinishPullServerTrailingMetadata() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] FinishPullServerTrailingMetadata: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_,
server_trailing_metadata_waiter_.DebugString());
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
LOG(FATAL) << "FinishPullServerTrailingMetadata called before "
"PollServerTrailingMetadataAvailable";
case ServerTrailingMetadataState::kPushed:
server_trailing_metadata_state_ = ServerTrailingMetadataState::kPulled;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPushedCancel:
server_trailing_metadata_state_ =
ServerTrailingMetadataState::kPulledCancel;
server_trailing_metadata_waiter_.Wake();
break;
case ServerTrailingMetadataState::kPulled:
case ServerTrailingMetadataState::kPulledCancel:
LOG(FATAL) << "FinishPullServerTrailingMetadata called twice";
}
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline Poll<bool>
CallState::PollWasCancelled() {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] PollWasCancelled: "
<< GRPC_DUMP_ARGS(this, server_trailing_metadata_state_);
switch (server_trailing_metadata_state_) {
case ServerTrailingMetadataState::kNotPushed:
case ServerTrailingMetadataState::kPushed:
case ServerTrailingMetadataState::kPushedCancel: {
return server_trailing_metadata_waiter_.pending();
}
case ServerTrailingMetadataState::kPulled:
return false;
case ServerTrailingMetadataState::kPulledCancel:
return true;
}
Crash("Unreachable");
}
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_CALL_STATE_H

@ -66,13 +66,13 @@ class HijackedCall final {
// A delegating UnstartedCallDestination for use as a hijacking filter.
//
// This class proveds the final StartCall method, and delegates to the
// This class provides the final StartCall method, and delegates to the
// InterceptCall() method for the actual interception. It has the same semantics
// as StartCall, but affords the implementation the ability to prepare the
// UnstartedCallHandler appropriately.
//
// Implementations may look at the unprocessed initial metadata
// and decide to do one of two things:
// and decide to do one of three things:
//
// 1. It can hijack the call. Returns a HijackedCall object that can
// be used to start new calls with the same metadata.
@ -81,6 +81,12 @@ class HijackedCall final {
//
// 3. It can pass the call through to the next interceptor by calling
// `PassThrough`.
//
// Upon the StartCall call the UnstartedCallHandler will be from the last
// *Interceptor* in the call chain (without having been processed by any
// intervening filters) -- note that this is commonly not useful (not enough
// guarantees), and so it's usually better to Hijack and examine the metadata.
class Interceptor : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) final {
@ -99,7 +105,7 @@ class Interceptor : public UnstartedCallDestination {
return Map(call_handler.PullClientInitialMetadata(),
[call_handler, destination = wrapped_destination_](
ValueOrFailure<ClientMetadataHandle> metadata) mutable
-> ValueOrFailure<HijackedCall> {
-> ValueOrFailure<HijackedCall> {
if (!metadata.ok()) return Failure{};
return HijackedCall(std::move(metadata.value()),
std::move(destination),

@ -93,7 +93,6 @@
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/channelz/channelz.h"
#include "src/core/client_channel/client_channel_filter.h"
@ -497,10 +496,9 @@ class GrpcLb final : public LoadBalancingPolicy {
new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// In TRANSIENT_FAILURE. Cancel the fallback timer and go into
// fallback mode immediately.
gpr_log(GPR_INFO,
"[grpclb %p] balancer channel in state:TRANSIENT_FAILURE (%s); "
"entering fallback mode",
parent_.get(), status.ToString().c_str());
LOG(INFO) << "[grpclb " << parent_.get()
<< "] balancer channel in state:TRANSIENT_FAILURE ("
<< status.ToString() << "); entering fallback mode";
parent_->fallback_at_startup_checks_pending_ = false;
parent_->channel_control_helper()->GetEventEngine()->Cancel(
*parent_->lb_fallback_timer_handle_);
@ -852,12 +850,11 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
client_stats = parent()->lb_calld_->client_stats()->Ref();
}
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] state=%s (%s) wrapping child "
"picker %p (serverlist=%p, client_stats=%p)",
parent(), this, ConnectivityStateName(state),
status.ToString().c_str(), picker.get(), serverlist.get(),
client_stats.get());
LOG(INFO) << "[grpclb " << parent() << " helper " << this
<< "] state=" << ConnectivityStateName(state) << " ("
<< status.ToString() << ") wrapping child picker " << picker.get()
<< " (serverlist=" << serverlist.get()
<< ", client_stats=" << client_stats.get() << ")";
}
parent()->channel_control_helper()->UpdateState(
state, status,
@ -949,8 +946,8 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO, "[grpclb %p] lb_calld=%p: Starting LB call %p",
grpclb_policy_.get(), this, lb_call_);
LOG(INFO) << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
<< ": Starting LB call " << lb_call_;
}
// Create the ops.
grpc_call_error call_error;
@ -1176,18 +1173,16 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
client_stats_report_interval_ = std::max(
Duration::Seconds(1), response.client_stats_report_interval);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy(), this,
client_stats_report_interval_.millis());
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting interval = "
<< client_stats_report_interval_.millis()
<< " milliseconds";
}
} else if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting NOT enabled",
grpclb_policy(), this);
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting NOT enabled";
}
seen_initial_response_ = true;
break;
@ -1197,12 +1192,11 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
auto serverlist_wrapper =
MakeRefCounted<Serverlist>(std::move(response.serverlist));
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
" servers received:\n%s",
grpclb_policy(), this,
serverlist_wrapper->serverlist().size(),
serverlist_wrapper->AsText().c_str());
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Serverlist with "
<< serverlist_wrapper->serverlist().size()
<< " servers received:\n"
<< serverlist_wrapper->AsText();
}
seen_serverlist_ = true;
// Start sending client load report only after we start using the
@ -1218,10 +1212,9 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
if (grpclb_policy()->serverlist_ != nullptr &&
*grpclb_policy()->serverlist_ == *serverlist_wrapper) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Incoming server list identical "
"to current, ignoring.",
grpclb_policy(), this);
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Incoming server list identical to current, "
"ignoring.";
}
} else { // New serverlist.
// Dispose of the fallback.
@ -1244,10 +1237,9 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
// it in favor of the xds policy. We will implement this the
// right way in the xds policy instead.
if (grpclb_policy()->fallback_mode_) {
gpr_log(GPR_INFO,
"[grpclb %p] Received response from balancer; exiting "
"fallback mode",
grpclb_policy());
LOG(INFO) << "[grpclb " << grpclb_policy()
<< "] Received response from balancer; exiting fallback "
"mode";
grpclb_policy()->fallback_mode_ = false;
}
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
@ -1266,9 +1258,8 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
}
case response.FALLBACK: {
if (!grpclb_policy()->fallback_mode_) {
gpr_log(GPR_INFO,
"[grpclb %p] Entering fallback mode as requested by balancer",
grpclb_policy());
LOG(INFO) << "[grpclb " << grpclb_policy()
<< "] Entering fallback mode as requested by balancer";
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
grpclb_policy()->fallback_at_startup_checks_pending_ = false;
grpclb_policy()->channel_control_helper()->GetEventEngine()->Cancel(
@ -1317,11 +1308,11 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
CHECK_NE(lb_call_, nullptr);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
char* status_details = grpc_slice_to_c_string(lb_call_status_details_);
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Status from LB server received. "
"Status = %d, details = '%s', (lb_call: %p), error '%s'",
grpclb_policy(), this, lb_call_status_, status_details, lb_call_,
StatusToString(error).c_str());
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Status from LB server received. Status = "
<< lb_call_status_ << ", details = '" << status_details
<< "', (lb_call: " << lb_call_ << "), error '"
<< StatusToString(error) << "'";
gpr_free(status_details);
}
// If this lb_calld is still in use, this call ended because of a failure so
@ -1334,10 +1325,9 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
grpclb_policy()->lb_calld_.reset();
if (grpclb_policy()->fallback_at_startup_checks_pending_) {
CHECK(!seen_serverlist_);
gpr_log(GPR_INFO,
"[grpclb %p] Balancer call finished without receiving "
"serverlist; entering fallback mode",
grpclb_policy());
LOG(INFO) << "[grpclb " << grpclb_policy()
<< "] Balancer call finished without receiving serverlist; "
"entering fallback mode";
grpclb_policy()->fallback_at_startup_checks_pending_ = false;
grpclb_policy()->channel_control_helper()->GetEventEngine()->Cancel(
*grpclb_policy()->lb_fallback_timer_handle_);
@ -1666,9 +1656,9 @@ void GrpcLb::StartBalancerCallLocked() {
CHECK(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
this, lb_channel_.get(), lb_calld_.get());
LOG(INFO) << "[grpclb " << this
<< "] Query for backends (lb_channel: " << lb_channel_.get()
<< ", lb_calld: " << lb_calld_.get() << ")";
}
lb_calld_->StartQuery();
}
@ -1678,8 +1668,8 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Connection to LB server lost...";
if (timeout > Duration::Zero()) {
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
this, timeout.millis());
LOG(INFO) << "[grpclb " << this << "] ... retry_timer_active in "
<< timeout.millis() << "ms.";
} else {
LOG(INFO) << "[grpclb " << this
<< "] ... retry_timer_active immediately.";
@ -1724,10 +1714,9 @@ void GrpcLb::MaybeEnterFallbackModeAfterStartup() {
if (!fallback_mode_ && !fallback_at_startup_checks_pending_ &&
(lb_calld_ == nullptr || !lb_calld_->seen_serverlist()) &&
!child_policy_ready_) {
gpr_log(GPR_INFO,
"[grpclb %p] lost contact with balancer and backends from "
"most recent serverlist; entering fallback mode",
this);
LOG(INFO) << "[grpclb " << this
<< "] lost contact with balancer and backends from most recent "
"serverlist; entering fallback mode";
fallback_mode_ = true;
CreateOrUpdateChildPolicyLocked();
}
@ -1873,9 +1862,8 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
gpr_log(GPR_INFO,
"[grpclb %p] removing %" PRIuPTR " subchannels from cache",
this, it->second.size());
LOG(INFO) << "[grpclb " << this << "] removing " << it->second.size()
<< " subchannels from cache";
}
cached_subchannels_.erase(it);
}

@ -39,7 +39,6 @@
#include <grpc/event_engine/event_engine.h>
#include <grpc/impl/channel_arg_names.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/address_utils/sockaddr_utils.h"
@ -633,9 +632,8 @@ void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] health watch state update: %s (%s)",
policy_.get(), ConnectivityStateName(new_state),
status.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
switch (new_state) {
case GRPC_CHANNEL_READY:
@ -675,9 +673,8 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel state %p (subchannel %p): starting watch",
pick_first_.get(), this, subchannel_.get());
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get() << "): starting watch";
}
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get();
@ -686,10 +683,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel state %p (subchannel %p): "
"cancelling watch and unreffing subchannel",
pick_first_.get(), this, subchannel_.get());
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
}
subchannel_data_ = nullptr;
subchannel_->CancelConnectivityStateWatch(watcher_);
@ -701,8 +697,8 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", pick_first_.get(),
subchannel_.get());
LOG(INFO) << "Pick First " << pick_first_.get() << " selected subchannel "
<< subchannel_.get();
}
CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
@ -713,7 +709,7 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
// If health checking is NOT enabled, report READY.
if (pick_first_->enable_health_watch_) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] starting health watch", pick_first_.get());
LOG(INFO) << "[PF " << pick_first_.get() << "] starting health watch";
}
auto watcher = std::make_unique<HealthWatcher>(
pick_first_.Ref(DEBUG_LOCATION, "HealthWatcher"));
@ -751,13 +747,13 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::
absl::Status status) {
if (watcher_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel state %p (subchannel %p): connectivity "
"changed: new_state=%s, status=%s, watcher=%p, "
"subchannel_data_=%p, pick_first_->selected_=%p",
pick_first_.get(), this, subchannel_.get(),
ConnectivityStateName(new_state), status.ToString().c_str(),
watcher_, subchannel_data_, pick_first_->selected_.get());
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): connectivity changed: new_state="
<< ConnectivityStateName(new_state) << ", status=" << status
<< ", watcher=" << watcher_
<< ", subchannel_data_=" << subchannel_data_
<< ", pick_first_->selected_=" << pick_first_->selected_.get();
}
// If we're still part of a subchannel list trying to connect, check
// if we're connected.
@ -774,9 +770,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::
// We aren't trying to connect, so we must be the selected subchannel.
CHECK(pick_first_->selected_.get() == this);
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p selected subchannel connectivity changed to %s",
pick_first_.get(), ConnectivityStateName(new_state));
LOG(INFO) << "Pick First " << pick_first_.get()
<< " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
}
// Any state change is considered to be a failure of the existing
// connection. Report the failure.
@ -798,10 +794,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelData(
RefCountedPtr<SubchannelInterface> subchannel)
: subchannel_list_(subchannel_list), index_(index) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR
": creating subchannel data",
subchannel_list_->policy_.get(), subchannel_list_, index_);
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index " << index_
<< ": creating subchannel data";
}
subchannel_state_ =
MakeOrphanable<SubchannelState>(this, std::move(subchannel));
@ -811,20 +806,20 @@ void PickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
PickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(
GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel_state %p): connectivity changed: old_state=%s, "
"new_state=%s, status=%s, seen_transient_failure=%d, p->selected_=%p, "
"p->subchannel_list_=%p, p->subchannel_list_->shutting_down_=%d",
p, subchannel_list_, index_, subchannel_list_->size(),
subchannel_state_.get(),
(connectivity_state_.has_value()
? ConnectivityStateName(*connectivity_state_)
: "N/A"),
ConnectivityStateName(new_state), status.ToString().c_str(),
seen_transient_failure_, p->selected_.get(), p->subchannel_list_.get(),
p->subchannel_list_->shutting_down_);
LOG(INFO) << "[PF " << p << "] subchannel list " << subchannel_list_
<< " index " << index_ << " of " << subchannel_list_->size()
<< " (subchannel_state " << subchannel_state_.get()
<< "): connectivity changed: old_state="
<< (connectivity_state_.has_value()
? ConnectivityStateName(*connectivity_state_)
: "N/A")
<< ", new_state=" << ConnectivityStateName(new_state)
<< ", status=" << status
<< ", seen_transient_failure=" << seen_transient_failure_
<< ", p->selected_=" << p->selected_.get()
<< ", p->subchannel_list_=" << p->subchannel_list_.get()
<< ", p->subchannel_list_->shutting_down_="
<< p->subchannel_list_->shutting_down_;
}
if (subchannel_list_->shutting_down_) return;
// The notification must be for a subchannel in the current list.
@ -864,10 +859,9 @@ void PickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
// connection and report IDLE.
if (p->selected_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p: new update has no subchannels in "
"state READY; dropping existing connection and going IDLE",
p, subchannel_list_);
LOG(INFO) << "[PF " << p << "] subchannel list " << subchannel_list_
<< ": new update has no subchannels in "
<< "state READY; dropping existing connection and going IDLE";
}
p->GoIdle();
} else {
@ -965,11 +959,10 @@ void PickFirst::SubchannelList::SubchannelData::RequestConnectionWithTimer() {
if (index_ != subchannel_list_->size() - 1) {
PickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p: starting Connection "
"Attempt Delay timer for %" PRId64 "ms for index %" PRIuPTR,
p, subchannel_list_, p->connection_attempt_delay_.millis(),
index_);
LOG(INFO) << "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index "
<< index_;
}
subchannel_list_->timer_handle_ =
p->channel_control_helper()->GetEventEngine()->RunAfter(
@ -982,14 +975,13 @@ void PickFirst::SubchannelList::SubchannelData::RequestConnectionWithTimer() {
sl->policy_->work_serializer()->Run(
[subchannel_list = std::move(subchannel_list)]() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p: Connection "
"Attempt Delay timer fired (shutting_down=%d, "
"selected=%p)",
subchannel_list->policy_.get(),
subchannel_list.get(),
subchannel_list->shutting_down_,
subchannel_list->policy_->selected_.get());
LOG(INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
"(shutting_down="
<< subchannel_list->shutting_down_ << ", selected="
<< subchannel_list->policy_->selected_.get() << ")";
}
if (subchannel_list->shutting_down_) return;
if (subchannel_list->policy_->selected_ != nullptr) return;
@ -1015,8 +1007,8 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Creating subchannel list %p - channel args: %s",
policy_.get(), this, args_.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
if (addresses == nullptr) return;
// Create a subchannel for each address.
@ -1028,18 +1020,16 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] could not create subchannel for address %s, ignoring",
policy_.get(), address.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get()
<< "] could not create subchannel for address "
<< address.ToString() << ", ignoring";
}
return;
}
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address %s",
policy_.get(), this, subchannels_.size(), subchannel.get(),
address.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] subchannel list " << this
<< " index " << subchannels_.size() << ": Created subchannel "
<< subchannel.get() << " for address " << address.ToString();
}
subchannels_.emplace_back(std::make_unique<SubchannelData>(
this, subchannels_.size(), std::move(subchannel)));
@ -1048,15 +1038,15 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
PickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Destroying subchannel_list %p", policy_.get(),
this);
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
}
void PickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Shutting down subchannel_list %p", policy_.get(),
this);
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
CHECK(!shutting_down_);
shutting_down_ = true;
@ -1100,10 +1090,8 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p failed to connect to "
"all subchannels",
policy_.get(), this);
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution();
@ -1577,9 +1565,8 @@ void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] health watch state update: %s (%s)",
policy_.get(), ConnectivityStateName(new_state),
status.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
switch (new_state) {
case GRPC_CHANNEL_READY:
@ -1619,11 +1606,9 @@ OldPickFirst::SubchannelList::SubchannelData::SubchannelData(
index_(index),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR
" (subchannel %p): starting watch",
subchannel_list_->policy_.get(), subchannel_list_, index_,
subchannel_.get());
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index " << index_
<< " (subchannel " << subchannel_.get() << "): starting watch";
}
auto watcher = std::make_unique<Watcher>(
subchannel_list_->Ref(DEBUG_LOCATION, "Watcher"), index_);
@ -1634,11 +1619,11 @@ OldPickFirst::SubchannelList::SubchannelData::SubchannelData(
void OldPickFirst::SubchannelList::SubchannelData::ShutdownLocked() {
if (subchannel_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): cancelling watch and unreffing subchannel",
subchannel_list_->policy_.get(), subchannel_list_, index_,
subchannel_list_->size(), subchannel_.get());
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index "
<< index_ << " of " << subchannel_list_->size()
<< " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
}
subchannel_->CancelConnectivityStateWatch(pending_watcher_);
pending_watcher_ = nullptr;
@ -1650,22 +1635,22 @@ void OldPickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
OldPickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(
GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): connectivity changed: old_state=%s, new_state=%s, "
"status=%s, shutting_down=%d, pending_watcher=%p, "
"seen_transient_failure=%d, p->selected_=%p, "
"p->subchannel_list_=%p, p->latest_pending_subchannel_list_=%p",
p, subchannel_list_, index_, subchannel_list_->size(),
subchannel_.get(),
(connectivity_state_.has_value()
? ConnectivityStateName(*connectivity_state_)
: "N/A"),
ConnectivityStateName(new_state), status.ToString().c_str(),
subchannel_list_->shutting_down_, pending_watcher_,
seen_transient_failure_, p->selected_, p->subchannel_list_.get(),
p->latest_pending_subchannel_list_.get());
LOG(INFO) << "[PF " << p << "] subchannel list " << subchannel_list_
<< " index " << index_ << " of " << subchannel_list_->size()
<< " (subchannel " << subchannel_.get()
<< "): connectivity changed: old_state="
<< (connectivity_state_.has_value()
? ConnectivityStateName(*connectivity_state_)
: "N/A")
<< ", new_state=" << ConnectivityStateName(new_state)
<< ", status=" << status
<< ", shutting_down=" << subchannel_list_->shutting_down_
<< ", pending_watcher=" << pending_watcher_
<< ", seen_transient_failure=" << seen_transient_failure_
<< ", p->selected_=" << p->selected_
<< ", p->subchannel_list_=" << p->subchannel_list_.get()
<< ", p->latest_pending_subchannel_list_="
<< p->latest_pending_subchannel_list_.get();
}
if (subchannel_list_->shutting_down_ || pending_watcher_ == nullptr) return;
auto& stats_plugins = subchannel_list_->policy_->channel_control_helper()
@ -1682,9 +1667,9 @@ void OldPickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
if (p->selected_ == this) {
CHECK(subchannel_list_ == p->subchannel_list_.get());
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p selected subchannel connectivity changed to %s", p,
ConnectivityStateName(new_state));
LOG(INFO) << "Pick First " << p
<< " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
}
// Any state change is considered to be a failure of the existing
// connection.
@ -1701,11 +1686,9 @@ void OldPickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
// If there is a pending update, switch to the pending update.
if (p->latest_pending_subchannel_list_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
LOG(INFO) << "Pick First " << p << " promoting pending subchannel "
<< "list " << p->latest_pending_subchannel_list_.get()
<< " to replace " << p->subchannel_list_.get();
}
p->UnsetSelectedSubchannel();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
@ -1865,11 +1848,10 @@ void OldPickFirst::SubchannelList::SubchannelData::
if (index_ != subchannel_list_->size() - 1) {
OldPickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p: starting Connection "
"Attempt Delay timer for %" PRId64 "ms for index %" PRIuPTR,
p, subchannel_list_, p->connection_attempt_delay_.millis(),
index_);
LOG(INFO) << "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index "
<< index_;
}
subchannel_list_->timer_handle_ =
p->channel_control_helper()->GetEventEngine()->RunAfter(
@ -1882,14 +1864,13 @@ void OldPickFirst::SubchannelList::SubchannelData::
sl->policy_->work_serializer()->Run(
[subchannel_list = std::move(subchannel_list)]() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p: Connection "
"Attempt Delay timer fired (shutting_down=%d, "
"selected=%p)",
subchannel_list->policy_.get(),
subchannel_list.get(),
subchannel_list->shutting_down_,
subchannel_list->policy_->selected_);
LOG(INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
<< "(shutting_down="
<< subchannel_list->shutting_down_ << ", selected="
<< subchannel_list->policy_->selected_ << ")";
}
if (subchannel_list->shutting_down_) return;
if (subchannel_list->policy_->selected_ != nullptr) return;
@ -1923,19 +1904,17 @@ void OldPickFirst::SubchannelList::SubchannelData::
// Case 2. Promote p->latest_pending_subchannel_list_ to p->subchannel_list_.
if (subchannel_list_ == p->latest_pending_subchannel_list_.get()) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
LOG(INFO) << "Pick First " << p << " promoting pending subchannel list "
<< p->latest_pending_subchannel_list_.get() << " to replace "
<< p->subchannel_list_.get();
}
p->UnsetSelectedSubchannel();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Cases 1 and 2.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p,
subchannel_.get());
LOG(INFO) << "Pick First " << p << " selected subchannel "
<< subchannel_.get();
}
p->selected_ = this;
// If health checking is enabled, start the health watch, but don't
@ -1944,7 +1923,7 @@ void OldPickFirst::SubchannelList::SubchannelData::
// If health checking is NOT enabled, report READY.
if (p->enable_health_watch_) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] starting health watch", p);
LOG(INFO) << "[PF " << p << "] starting health watch";
}
auto watcher = std::make_unique<HealthWatcher>(
p->RefAsSubclass<OldPickFirst>(DEBUG_LOCATION, "HealthWatcher"));
@ -1979,8 +1958,8 @@ OldPickFirst::SubchannelList::SubchannelList(
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Creating subchannel list %p - channel args: %s",
policy_.get(), this, args_.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
if (addresses == nullptr) return;
// Create a subchannel for each address.
@ -1992,18 +1971,16 @@ OldPickFirst::SubchannelList::SubchannelList(
if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] could not create subchannel for address %s, ignoring",
policy_.get(), address.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get()
<< "] could not create subchannel for address "
<< address.ToString() << ", ignoring";
}
return;
}
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"[PF %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address %s",
policy_.get(), this, subchannels_.size(), subchannel.get(),
address.ToString().c_str());
LOG(INFO) << "[PF " << policy_.get() << "] subchannel list " << this
<< " index " << subchannels_.size() << ": Created subchannel "
<< subchannel.get() << " for address " << address.ToString();
}
subchannels_.emplace_back(this, subchannels_.size(), std::move(subchannel));
});
@ -2011,15 +1988,15 @@ OldPickFirst::SubchannelList::SubchannelList(
OldPickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Destroying subchannel_list %p", policy_.get(),
this);
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
}
void OldPickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO, "[PF %p] Shutting down subchannel_list %p", policy_.get(),
this);
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
CHECK(!shutting_down_);
shutting_down_ = true;
@ -2065,21 +2042,18 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p subchannel list %p failed to connect to "
"all subchannels",
policy_.get(), this);
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us.
if (policy_->latest_pending_subchannel_list_.get() == this) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
policy_.get(), policy_->latest_pending_subchannel_list_.get(),
this);
LOG(INFO) << "Pick First " << policy_.get()
<< " promoting pending subchannel list "
<< policy_->latest_pending_subchannel_list_.get()
<< " to replace " << this;
}
policy_->UnsetSelectedSubchannel();
policy_->subchannel_list_ =
@ -2108,7 +2082,6 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
}
}
}
//
// factory
//

@ -37,7 +37,6 @@
#include <grpc/event_engine/event_engine.h>
#include <grpc/impl/channel_arg_names.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
@ -318,9 +317,9 @@ void PriorityLb::ExitIdleLocked() {
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] exiting IDLE for current priority %d child %s",
this, current_priority_, child_name.c_str());
LOG(INFO) << "[priority_lb " << this
<< "] exiting IDLE for current priority " << current_priority_
<< " child " << child_name;
}
children_[child_name]->ExitIdleLocked();
}
@ -404,8 +403,8 @@ void PriorityLb::ChoosePriorityLocked() {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] trying priority %u, child %s", this,
priority, child_name.c_str());
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
// Create child if needed.
@ -445,28 +444,26 @@ void PriorityLb::ChoosePriorityLocked() {
}
// Child has been failing for a while. Move on to the next priority.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] skipping priority %u, child %s: state=%s, "
"failover timer not pending",
this, priority, child_name.c_str(),
ConnectivityStateName(child->connectivity_state()));
LOG(INFO) << "[priority_lb " << this << "] skipping priority " << priority
<< ", child " << child_name << ": state="
<< ConnectivityStateName(child->connectivity_state())
<< ", failover timer not pending";
}
}
// If we didn't find any priority to try, pick the first one in state
// CONNECTING.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] no priority reachable, checking for CONNECTING "
"priority to delegate to",
this);
LOG(INFO) << "[priority_lb " << this
<< "] no priority reachable, checking for CONNECTING priority to "
"delegate to";
}
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] trying priority %u, child %s", this,
priority, child_name.c_str());
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
CHECK(child != nullptr);
@ -486,11 +483,10 @@ void PriorityLb::SetCurrentPriorityLocked(int32_t priority,
bool deactivate_lower_priorities,
const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] selecting priority %u, child %s (%s, "
"deactivate_lower_priorities=%d)",
this, priority, config_->priorities()[priority].c_str(), reason,
deactivate_lower_priorities);
LOG(INFO) << "[priority_lb " << this << "] selecting priority " << priority
<< ", child " << config_->priorities()[priority] << " (" << reason
<< ", deactivate_lower_priorities=" << deactivate_lower_priorities
<< ")";
}
current_priority_ = priority;
if (deactivate_lower_priorities) {
@ -515,12 +511,10 @@ PriorityLb::ChildPriority::DeactivationTimer::DeactivationTimer(
RefCountedPtr<PriorityLb::ChildPriority> child_priority)
: child_priority_(std::move(child_priority)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in "
"%" PRId64 "ms",
child_priority_->priority_policy_.get(),
child_priority_->name_.c_str(), child_priority_.get(),
kChildRetentionInterval.millis());
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): deactivating -- will remove in "
<< kChildRetentionInterval.millis() << "ms";
}
timer_handle_ =
child_priority_->priority_policy_->channel_control_helper()
@ -539,9 +533,9 @@ PriorityLb::ChildPriority::DeactivationTimer::DeactivationTimer(
void PriorityLb::ChildPriority::DeactivationTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): reactivating",
child_priority_->priority_policy_.get(),
child_priority_->name_.c_str(), child_priority_.get());
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): reactivating";
}
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
@ -555,11 +549,10 @@ void PriorityLb::ChildPriority::DeactivationTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
child_priority_->priority_policy_.get(),
child_priority_->name_.c_str(), child_priority_.get());
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get()
<< "): deactivation timer fired, deleting child";
}
child_priority_->priority_policy_->DeleteChild(child_priority_.get());
}
@ -573,13 +566,12 @@ PriorityLb::ChildPriority::FailoverTimer::FailoverTimer(
RefCountedPtr<PriorityLb::ChildPriority> child_priority)
: child_priority_(std::move(child_priority)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(
GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %" PRId64
"ms",
child_priority_->priority_policy_.get(), child_priority_->name_.c_str(),
child_priority_.get(),
child_priority_->priority_policy_->child_failover_timeout_.millis());
LOG(INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): starting failover timer for "
<< child_priority_->priority_policy_->child_failover_timeout_.millis()
<< "ms";
}
timer_handle_ =
child_priority_->priority_policy_->channel_control_helper()
@ -599,10 +591,9 @@ PriorityLb::ChildPriority::FailoverTimer::FailoverTimer(
void PriorityLb::ChildPriority::FailoverTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): cancelling failover timer",
child_priority_->priority_policy_.get(),
child_priority_->name_.c_str(), child_priority_.get());
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): cancelling failover timer";
}
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
@ -616,11 +607,10 @@ void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
child_priority_->priority_policy_.get(),
child_priority_->name_.c_str(), child_priority_.get());
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get()
<< "): failover timer fired, reporting TRANSIENT_FAILURE";
}
child_priority_->OnConnectivityStateUpdateLocked(
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -637,8 +627,8 @@ PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] creating child %s (%p)",
priority_policy_.get(), name_.c_str(), this);
LOG(INFO) << "[priority_lb " << priority_policy_.get()
<< "] creating child " << name_ << " (" << this << ")";
}
// Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
@ -646,8 +636,8 @@ PriorityLb::ChildPriority::ChildPriority(
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): orphaned",
priority_policy_.get(), name_.c_str(), this);
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): orphaned";
}
failover_timer_.reset();
deactivation_timer_.reset();
@ -676,8 +666,8 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): start update",
priority_policy_.get(), name_.c_str(), this);
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): start update";
}
ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed.
@ -702,9 +692,9 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
update_args.args = priority_policy_->args_;
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): updating child policy handler %p",
priority_policy_.get(), name_.c_str(), this, child_policy_.get());
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): updating child policy handler "
<< child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -720,10 +710,9 @@ PriorityLb::ChildPriority::CreateChildPolicyLocked(const ChannelArgs& args) {
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&priority_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): created new child policy "
"handler %p",
priority_policy_.get(), name_.c_str(), this, lb_policy.get());
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): created new child policy handler "
<< lb_policy.get();
}
// Add the parent's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
@ -745,11 +734,10 @@ void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): state update: %s (%s) picker %p",
priority_policy_.get(), name_.c_str(), this,
ConnectivityStateName(state), status.ToString().c_str(),
picker.get());
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this
<< "): state update: " << ConnectivityStateName(state) << " ("
<< status << ") picker " << picker.get();
}
// Store the state and picker.
connectivity_state_ = state;

@ -40,7 +40,6 @@
#include <grpc/impl/channel_arg_names.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/json.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/client_channel/client_channel_internal.h"
@ -573,14 +572,13 @@ void RingHash::RingHashEndpoint::OnStateUpdate(
grpc_connectivity_state new_state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
gpr_log(
GPR_INFO,
"[RH %p] connectivity changed for endpoint %p (%s, child_policy=%p): "
"prev_state=%s new_state=%s (%s)",
ring_hash_.get(), this,
ring_hash_->endpoints_[index_].ToString().c_str(), child_policy_.get(),
ConnectivityStateName(connectivity_state_),
ConnectivityStateName(new_state), status.ToString().c_str());
LOG(INFO) << "[RH " << ring_hash_.get()
<< "] connectivity changed for endpoint " << this << " ("
<< ring_hash_->endpoints_[index_].ToString()
<< ", child_policy=" << child_policy_.get()
<< "): prev_state=" << ConnectivityStateName(connectivity_state_)
<< " new_state=" << ConnectivityStateName(new_state) << " ("
<< status << ")";
}
if (child_policy_ == nullptr) return; // Already orphaned.
// Update state.

@ -64,7 +64,6 @@
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/json.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/channelz/channelz.h"
@ -807,8 +806,9 @@ RlsLb::ChildPolicyWrapper::ChildPolicyWrapper(RefCountedPtr<RlsLb> lb_policy,
void RlsLb::ChildPolicyWrapper::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] ChildPolicyWrapper=%p [%s]: shutdown",
lb_policy_.get(), this, target_.c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: shutdown";
}
is_shutdown_ = true;
lb_policy_->child_policy_map_.erase(target_);
@ -866,11 +866,10 @@ void RlsLb::ChildPolicyWrapper::StartUpdate() {
lb_policy_->config_->child_policy_config(), &errors);
CHECK(child_policy_config.has_value());
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(
GPR_INFO,
"[rlslb %p] ChildPolicyWrapper=%p [%s]: validating update, config: %s",
lb_policy_.get(), this, target_.c_str(),
JsonDump(*child_policy_config).c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: validating update, config: "
<< JsonDump(*child_policy_config);
}
auto config =
CoreConfiguration::Get().lb_policy_registry().ParseLoadBalancingConfig(
@ -878,11 +877,9 @@ void RlsLb::ChildPolicyWrapper::StartUpdate() {
// Returned RLS target fails the validation.
if (!config.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] ChildPolicyWrapper=%p [%s]: config failed to parse: "
"%s",
lb_policy_.get(), this, target_.c_str(),
config.status().ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: config failed to parse: " << config.status();
}
pending_config_.reset();
picker_ = MakeRefCounted<TransientFailurePicker>(
@ -907,20 +904,19 @@ absl::Status RlsLb::ChildPolicyWrapper::MaybeFinishUpdate() {
child_policy_ = MakeOrphanable<ChildPolicyHandler>(std::move(create_args),
&rls_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] ChildPolicyWrapper=%p [%s], created new child policy "
"handler %p",
lb_policy_.get(), this, target_.c_str(), child_policy_.get());
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "], created new child policy handler "
<< child_policy_.get();
}
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
lb_policy_->interested_parties());
}
// Send the child the updated config.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] ChildPolicyWrapper=%p [%s], updating child policy "
"handler %p",
lb_policy_.get(), this, target_.c_str(), child_policy_.get());
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "], updating child policy handler " << child_policy_.get();
}
UpdateArgs update_args;
update_args.config = std::move(pending_config_);
@ -937,12 +933,11 @@ void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] ChildPolicyWrapper=%p [%s] ChildPolicyHelper=%p: "
"UpdateState(state=%s, status=%s, picker=%p)",
wrapper_->lb_policy_.get(), wrapper_.get(),
wrapper_->target_.c_str(), this, ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
LOG(INFO) << "[rlslb " << wrapper_->lb_policy_.get()
<< "] ChildPolicyWrapper=" << wrapper_.get() << " ["
<< wrapper_->target_ << "] ChildPolicyHelper=" << this
<< ": UpdateState(state=" << ConnectivityStateName(state)
<< ", status=" << status << ", picker=" << picker.get() << ")";
}
{
MutexLock lock(&wrapper_->lb_policy_->mu_);
@ -1045,8 +1040,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)};
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: request keys: %s",
lb_policy_.get(), this, key.ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": request keys: " << key.ToString();
}
Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_);
@ -1083,8 +1078,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
// If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: using cache entry %p",
lb_policy_.get(), this, entry);
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": using cache entry " << entry;
}
return entry->Pick(args);
}
@ -1099,8 +1094,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
}
// RLS call pending. Queue the pick.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: RLS request pending; queuing pick",
lb_policy_.get(), this);
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": RLS request pending; queuing pick";
}
return PickResult::Queue();
}
@ -1109,8 +1104,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: %s; using default target",
lb_policy_.get(), this, reason);
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; using default target";
}
auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
@ -1118,8 +1113,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
return pick_result;
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: %s; failing pick",
lb_policy_.get(), this, reason);
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; failing pick";
}
auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup();
@ -1155,10 +1150,11 @@ void RlsLb::Cache::Entry::BackoffTimer::Orphan() {
entry_->lb_policy_->channel_control_helper()->GetEventEngine()->Cancel(
*backoff_timer_task_handle_)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] cache entry=%p %s, backoff timer canceled",
entry_->lb_policy_.get(), entry_.get(),
entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString().c_str());
LOG(INFO) << "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer canceled";
}
}
backoff_timer_task_handle_.reset();
@ -1169,10 +1165,11 @@ void RlsLb::Cache::Entry::BackoffTimer::OnBackoffTimerLocked() {
{
MutexLock lock(&entry_->lb_policy_->mu_);
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] cache entry=%p %s, backoff timer fired",
entry_->lb_policy_.get(), entry_.get(),
entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString().c_str());
LOG(INFO) << "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer fired";
}
// Skip the update if Orphaned
if (!backoff_timer_task_handle_.has_value()) return;
@ -1208,8 +1205,8 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
void RlsLb::Cache::Entry::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] cache entry=%p %s: cache entry evicted",
lb_policy_.get(), this, lru_iterator_->ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": cache entry evicted";
}
is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_);
@ -1239,12 +1236,11 @@ LoadBalancingPolicy::PickResult RlsLb::Cache::Entry::Pick(PickArgs args) {
GRPC_CHANNEL_TRANSIENT_FAILURE &&
i < child_policy_wrappers_.size() - 1) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] cache entry=%p %s: target %s (%" PRIuPTR
" of %" PRIuPTR ") in state TRANSIENT_FAILURE; skipping",
lb_policy_.get(), this, lru_iterator_->ToString().c_str(),
child_policy_wrapper->target().c_str(), i,
child_policy_wrappers_.size());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size()
<< ") in state TRANSIENT_FAILURE; skipping";
}
continue;
}
@ -1253,13 +1249,13 @@ LoadBalancingPolicy::PickResult RlsLb::Cache::Entry::Pick(PickArgs args) {
// Child policy not in TRANSIENT_FAILURE or is the last target in
// the list, so delegate.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] cache entry=%p %s: target %s (%" PRIuPTR " of %" PRIuPTR
") in state %s; delegating",
lb_policy_.get(), this, lru_iterator_->ToString().c_str(),
child_policy_wrapper->target().c_str(), i,
child_policy_wrappers_.size(),
ConnectivityStateName(child_policy_wrapper->connectivity_state()));
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size() << ") in state "
<< ConnectivityStateName(
child_policy_wrapper->connectivity_state())
<< "; delegating";
}
auto pick_result = child_policy_wrapper->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricTargetPicks,
@ -1408,15 +1404,15 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] key=%s: cache entry added, entry=%p",
lb_policy_, key.ToString().c_str(), entry);
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": cache entry added, entry=" << entry;
}
return entry;
}
// Entry found, so use it.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] key=%s: found cache entry %p", lb_policy_,
key.ToString().c_str(), it->second.get());
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": found cache entry " << it->second.get();
}
it->second->MarkUsed();
return it->second.get();
@ -1424,8 +1420,8 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
void RlsLb::Cache::Resize(size_t bytes) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] resizing cache to %" PRIuPTR " bytes",
lb_policy_, bytes);
LOG(INFO) << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes
<< " bytes";
}
size_limit_ = bytes;
MaybeShrinkSize(size_limit_);
@ -1445,7 +1441,7 @@ void RlsLb::Cache::Shutdown() {
lb_policy_->channel_control_helper()->GetEventEngine()->Cancel(
*cleanup_timer_handle_)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] cache cleanup timer canceled", lb_policy_);
LOG(INFO) << "[rlslb " << lb_policy_ << "] cache cleanup timer canceled";
}
}
cleanup_timer_handle_.reset();
@ -1483,7 +1479,7 @@ void RlsLb::Cache::StartCleanupTimer() {
void RlsLb::Cache::OnCleanupTimer() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] cache cleanup timer fired", lb_policy_);
LOG(INFO) << "[rlslb " << lb_policy_ << "] cache cleanup timer fired";
}
MutexLock lock(&lb_policy_->mu_);
if (!cleanup_timer_handle_.has_value()) return;
@ -1512,17 +1508,16 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] LRU eviction: removing entry %p %s",
lb_policy_, map_it->second.get(), lru_it->ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString();
}
size_ -= map_it->second->Size();
map_.erase(map_it);
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] LRU pass complete: desired size=%" PRIuPTR
" size=%" PRIuPTR,
lb_policy_, bytes, size_);
LOG(INFO) << "[rlslb " << lb_policy_
<< "] LRU pass complete: desired size=" << bytes
<< " size=" << size_;
}
}
@ -1534,11 +1529,9 @@ void RlsLb::RlsChannel::StateWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, const absl::Status& status) {
auto* lb_policy = rls_channel_->lb_policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] RlsChannel=%p StateWatcher=%p: "
"state changed to %s (%s)",
lb_policy, rls_channel_.get(), this,
ConnectivityStateName(new_state), status.ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy << "] RlsChannel=" << rls_channel_.get()
<< " StateWatcher=" << this << ": state changed to "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
if (rls_channel_->is_shutdown_) return;
MutexLock lock(&lb_policy->mu_);
@ -1633,9 +1626,9 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
grpc_channel_create(lb_policy_->config_->lookup_service().c_str(),
creds.get(), args.ToC().get())));
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] RlsChannel=%p: created channel %p for %s",
lb_policy_.get(), this, channel_.get(),
lb_policy_->config_->lookup_service().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ": created channel " << channel_.get() << " for "
<< lb_policy_->config_->lookup_service();
}
if (channel_ != nullptr) {
// Set up channelz linkage.
@ -1656,8 +1649,8 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
void RlsLb::RlsChannel::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] RlsChannel=%p, channel=%p: shutdown",
lb_policy_.get(), this, channel_.get());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ", channel=" << channel_.get() << ": shutdown";
}
is_shutdown_ = true;
if (channel_ != nullptr) {
@ -1723,9 +1716,8 @@ RlsLb::RlsRequest::RlsRequest(
reason_(reason),
stale_header_data_(std::move(stale_header_data)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] rls_request=%p: RLS request created for key %s",
lb_policy_.get(), this, key_.ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< ": RLS request created for key " << key_.ToString();
}
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run(
@ -1740,8 +1732,8 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] rls_request=%p %s: cancelling RLS call",
lb_policy_.get(), this, key_.ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString() << ": cancelling RLS call";
}
grpc_call_cancel_internal(call_);
}
@ -1814,12 +1806,10 @@ void RlsLb::RlsRequest::OnRlsCallComplete(void* arg, grpc_error_handle error) {
void RlsLb::RlsRequest::OnRlsCallCompleteLocked(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
std::string status_message(StringViewFromSlice(status_details_recv_));
gpr_log(GPR_INFO,
"[rlslb %p] rls_request=%p %s, error=%s, status={%d, %s} RLS call "
"response received",
lb_policy_.get(), this, key_.ToString().c_str(),
StatusToString(error).c_str(), status_recv_,
status_message.c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString() << ", error=" << StatusToString(error)
<< ", status={" << status_recv_ << ", " << status_message << "}"
<< " RLS call response received";
}
// Parse response.
ResponseInfo response;
@ -1846,9 +1836,9 @@ void RlsLb::RlsRequest::OnRlsCallCompleteLocked(grpc_error_handle error) {
call_ = nullptr;
// Return result to cache.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] rls_request=%p %s: response info: %s",
lb_policy_.get(), this, key_.ToString().c_str(),
response.ToString().c_str());
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString()
<< ": response info: " << response.ToString();
}
std::vector<ChildPolicyWrapper*> child_policies_to_finish_update;
{
@ -2042,8 +2032,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
created_default_child = true;
} else {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO,
"[rlslb %p] using existing child for default target", this);
LOG(INFO) << "[rlslb " << this
<< "] using existing child for default target";
}
default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
@ -2197,9 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
gpr_log(GPR_INFO, "[rlslb %p] target %s in state %s", this,
p.second->target().c_str(),
ConnectivityStateName(child_state));
LOG(INFO) << "[rlslb " << this << "] target " << p.second->target()
<< " in state " << ConnectivityStateName(child_state);
}
if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY;

@ -42,7 +42,6 @@
#include <grpc/event_engine/event_engine.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
@ -449,11 +448,11 @@ void WeightedRoundRobin::EndpointWeight::MaybeUpdateWeight(
}
if (weight == 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] subchannel %s: qps=%f, eps=%f, utilization=%f: "
"error_util_penalty=%f, weight=%f (not updating)",
wrr_.get(), key_.ToString().c_str(), qps, eps, utilization,
error_utilization_penalty, weight);
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps
<< ", utilization=" << utilization
<< ": error_util_penalty=" << error_utilization_penalty
<< ", weight=" << weight << " (not updating)";
}
return;
}
@ -461,14 +460,14 @@ void WeightedRoundRobin::EndpointWeight::MaybeUpdateWeight(
// Grab the lock and update the data.
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] subchannel %s: qps=%f, eps=%f, utilization=%f "
"error_util_penalty=%f : setting weight=%f weight_=%f now=%s "
"last_update_time_=%s non_empty_since_=%s",
wrr_.get(), key_.ToString().c_str(), qps, eps, utilization,
error_utilization_penalty, weight, weight_, now.ToString().c_str(),
last_update_time_.ToString().c_str(),
non_empty_since_.ToString().c_str());
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps
<< ", utilization=" << utilization
<< " error_util_penalty=" << error_utilization_penalty
<< " : setting weight=" << weight << " weight_=" << weight_
<< " now=" << now.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString();
}
if (non_empty_since_ == Timestamp::InfFuture()) non_empty_since_ = now;
weight_ = weight;
@ -480,15 +479,14 @@ float WeightedRoundRobin::EndpointWeight::GetWeight(
uint64_t* num_not_yet_usable, uint64_t* num_stale) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] subchannel %s: getting weight: now=%s "
"weight_expiration_period=%s blackout_period=%s "
"last_update_time_=%s non_empty_since_=%s weight_=%f",
wrr_.get(), key_.ToString().c_str(), now.ToString().c_str(),
weight_expiration_period.ToString().c_str(),
blackout_period.ToString().c_str(),
last_update_time_.ToString().c_str(),
non_empty_since_.ToString().c_str(), weight_);
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": getting weight: now=" << now.ToString()
<< " weight_expiration_period="
<< weight_expiration_period.ToString()
<< " blackout_period=" << blackout_period.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString()
<< " weight_=" << weight_;
}
// If the most recent update was longer ago than the expiration
// period, reset non_empty_since_ so that we apply the blackout period
@ -558,24 +556,25 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
global_stats().IncrementWrrSubchannelListSize(endpoint_list->size());
global_stats().IncrementWrrSubchannelReadySize(endpoints_.size());
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p picker %p] created picker from endpoint_list=%p "
"with %" PRIuPTR " subchannels",
wrr_.get(), this, endpoint_list, endpoints_.size());
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] created picker from endpoint_list=" << endpoint_list
<< " with " << endpoints_.size() << " subchannels";
}
BuildSchedulerAndStartTimerLocked();
}
WeightedRoundRobin::Picker::~Picker() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] destroying picker", wrr_.get(), this);
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] destroying picker";
}
}
void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] cancelling timer", wrr_.get(), this);
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] cancelling timer";
}
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
@ -587,9 +586,9 @@ WeightedRoundRobin::PickResult WeightedRoundRobin::Picker::Pick(PickArgs args) {
CHECK(index < endpoints_.size());
auto& endpoint_info = endpoints_[index];
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p picker %p] returning index %" PRIuPTR ", picker=%p",
wrr_.get(), this, index, endpoint_info.picker.get());
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] returning index " << index
<< ", picker=" << endpoint_info.picker.get();
}
auto result = endpoint_info.picker->Pick(args);
// Collect per-call utilization data if needed.
@ -643,8 +642,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] new weights: %s", wrr_.get(), this,
absl::StrJoin(weights, " ").c_str());
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] new weights: " << absl::StrJoin(weights, " ");
}
auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
@ -653,13 +652,13 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] new scheduler: %p", wrr_.get(),
this, scheduler.get());
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] new scheduler: " << scheduler.get();
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] no scheduler, falling back to RR",
wrr_.get(), this);
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] no scheduler, falling back to RR";
}
stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()},
@ -671,8 +670,9 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
}
// Start timer.
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] scheduling timer for %s", wrr_.get(),
this, config_->weight_update_period().ToString().c_str());
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] scheduling timer for "
<< config_->weight_update_period().ToString();
}
timer_handle_ = wrr_->channel_control_helper()->GetEventEngine()->RunAfter(
config_->weight_update_period(),
@ -684,8 +684,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO, "[WRR %p picker %p] timer fired",
self->wrr_.get(), self.get());
LOG(INFO) << "[WRR " << self->wrr_.get() << " picker "
<< self.get() << "] timer fired";
}
self->BuildSchedulerAndStartTimerLocked();
}
@ -876,19 +876,19 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
auto* wrr_endpoint_list = endpoint_list<WrrEndpointList>();
auto* wrr = policy<WeightedRoundRobin>();
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] connectivity changed for child %p, endpoint_list %p "
"(index %" PRIuPTR " of %" PRIuPTR
"): prev_state=%s new_state=%s (%s)",
wrr, this, wrr_endpoint_list, Index(), wrr_endpoint_list->size(),
(old_state.has_value() ? ConnectivityStateName(*old_state) : "N/A"),
ConnectivityStateName(new_state), status.ToString().c_str());
LOG(INFO) << "[WRR " << wrr << "] connectivity changed for child " << this
<< ", endpoint_list " << wrr_endpoint_list << " (index "
<< Index() << " of " << wrr_endpoint_list->size()
<< "): prev_state="
<< (old_state.has_value() ? ConnectivityStateName(*old_state)
: "N/A")
<< " new_state=" << ConnectivityStateName(new_state) << " ("
<< status << ")";
}
if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] child %p reported IDLE; requesting connection", wrr,
this);
LOG(INFO) << "[WRR " << wrr << "] child " << this
<< " reported IDLE; requesting connection";
}
ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) {
@ -970,10 +970,9 @@ void WeightedRoundRobin::WrrEndpointList::
const std::string old_counters_string =
wrr->endpoint_list_ != nullptr ? wrr->endpoint_list_->CountersString()
: "";
gpr_log(GPR_INFO,
"[WRR %p] swapping out endpoint list %p (%s) in favor of %p (%s)",
wrr, wrr->endpoint_list_.get(), old_counters_string.c_str(), this,
CountersString().c_str());
LOG(INFO) << "[WRR " << wrr << "] swapping out endpoint list "
<< wrr->endpoint_list_.get() << " (" << old_counters_string
<< ") in favor of " << this << " (" << CountersString() << ")";
}
wrr->endpoint_list_ = std::move(wrr->latest_pending_endpoint_list_);
}
@ -1001,9 +1000,9 @@ void WeightedRoundRobin::WrrEndpointList::
MakeRefCounted<QueuePicker>(nullptr));
} else if (num_transient_failure_ == size()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
gpr_log(GPR_INFO,
"[WRR %p] reporting TRANSIENT_FAILURE with endpoint list %p: %s",
wrr, this, status_for_tf.ToString().c_str());
LOG(INFO) << "[WRR " << wrr
<< "] reporting TRANSIENT_FAILURE with endpoint list " << this
<< ": " << status_for_tf;
}
if (!status_for_tf.ok()) {
last_failure_ = absl::UnavailableError(

@ -35,7 +35,6 @@
#include <grpc/grpc_security.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/json.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
@ -282,8 +281,9 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
// Get new config.
auto new_config = args.config.TakeAsSubclass<CdsLbConfig>();
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
gpr_log(GPR_INFO, "[cdslb %p] received update: cluster=%s is_dynamic=%d",
this, new_config->cluster().c_str(), new_config->is_dynamic());
LOG(INFO) << "[cdslb " << this
<< "] received update: cluster=" << new_config->cluster()
<< " is_dynamic=" << new_config->is_dynamic();
}
CHECK(new_config != nullptr);
// Cluster name should never change, because we should use a different
@ -327,10 +327,9 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
// recently subscribed but another update came through before we
// got the new cluster, in which case it will still be missing.
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
gpr_log(GPR_INFO,
"[cdslb %p] xDS config has no entry for dynamic cluster %s, "
"waiting for subsequent update",
this, cluster_name_.c_str());
LOG(INFO) << "[cdslb " << this
<< "] xDS config has no entry for dynamic cluster "
<< cluster_name_ << ", waiting for subsequent update";
}
// Stay in CONNECTING until we get an update that has the cluster.
return absl::OkStatus();
@ -454,9 +453,9 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties());
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
gpr_log(GPR_INFO, "[cdslb %p] created child policy %s (%p)", this,
std::string((*child_config)->name()).c_str(),
child_policy_.get());
LOG(INFO) << "[cdslb " << this << "] created child policy "
<< (*child_config)->name() << " (" << child_policy_.get()
<< ")";
}
}
// Update child policy.

@ -35,7 +35,6 @@
#include <grpc/event_engine/event_engine.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/client_channel/client_channel_internal.h"
@ -251,10 +250,8 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
XdsClusterManagerLb::~XdsClusterManagerLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
gpr_log(
GPR_INFO,
"[xds_cluster_manager_lb %p] destroying xds_cluster_manager LB policy",
this);
LOG(INFO) << "[xds_cluster_manager_lb " << this
<< "] destroying xds_cluster_manager LB policy";
}
}

@ -35,6 +35,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/handshaker/handshaker.h"
#include "src/core/handshaker/handshaker_registry.h"
#include "src/core/handshaker/tcp_connect/tcp_connect_handshaker.h"
#include "src/core/lib/address_utils/sockaddr_utils.h"
@ -192,9 +193,7 @@ HttpRequest::HttpRequest(
HttpRequest::~HttpRequest() {
grpc_channel_args_destroy(channel_args_);
grpc_http_parser_destroy(&parser_);
if (own_endpoint_ && ep_ != nullptr) {
grpc_endpoint_destroy(ep_);
}
ep_.reset();
CSliceUnref(request_text_);
grpc_iomgr_unregister_object(&iomgr_obj_);
grpc_slice_buffer_destroy(&incoming_);
@ -231,10 +230,7 @@ void HttpRequest::Orphan() {
handshake_mgr_->Shutdown(
GRPC_ERROR_CREATE("HTTP request cancelled during handshake"));
}
if (own_endpoint_ && ep_ != nullptr) {
grpc_endpoint_destroy(ep_);
ep_ = nullptr;
}
ep_.reset();
}
Unref();
}
@ -288,36 +284,30 @@ void HttpRequest::StartWrite() {
CSliceRef(request_text_);
grpc_slice_buffer_add(&outgoing_, request_text_);
Ref().release(); // ref held by pending write
grpc_endpoint_write(ep_, &outgoing_, &done_write_, nullptr,
grpc_endpoint_write(ep_.get(), &outgoing_, &done_write_, nullptr,
/*max_frame_size=*/INT_MAX);
}
void HttpRequest::OnHandshakeDone(void* arg, grpc_error_handle error) {
auto* args = static_cast<HandshakerArgs*>(arg);
RefCountedPtr<HttpRequest> req(static_cast<HttpRequest*>(args->user_data));
void HttpRequest::OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
if (g_test_only_on_handshake_done_intercept != nullptr) {
// Run this testing intercept before the lock so that it has a chance to
// do things like calling Orphan on the request
g_test_only_on_handshake_done_intercept(req.get());
g_test_only_on_handshake_done_intercept(this);
}
MutexLock lock(&req->mu_);
req->own_endpoint_ = true;
if (!error.ok()) {
req->handshake_mgr_.reset();
req->NextAddress(error);
MutexLock lock(&mu_);
if (!result.ok()) {
handshake_mgr_.reset();
NextAddress(result.status());
return;
}
// Handshake completed, so we own fields in args
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
req->ep_ = args->endpoint;
req->handshake_mgr_.reset();
if (req->cancelled_) {
req->NextAddress(
GRPC_ERROR_CREATE("HTTP request cancelled during handshake"));
// Handshake completed, so get the endpoint.
ep_ = std::move((*result)->endpoint);
handshake_mgr_.reset();
if (cancelled_) {
NextAddress(GRPC_ERROR_CREATE("HTTP request cancelled during handshake"));
return;
}
req->StartWrite();
StartWrite();
}
void HttpRequest::DoHandshake(const grpc_resolved_address* addr) {
@ -343,13 +333,11 @@ void HttpRequest::DoHandshake(const grpc_resolved_address* addr) {
handshake_mgr_ = MakeRefCounted<HandshakeManager>();
CoreConfiguration::Get().handshaker_registry().AddHandshakers(
HANDSHAKER_CLIENT, args, pollset_set_, handshake_mgr_.get());
Ref().release(); // ref held by pending handshake
grpc_endpoint* ep = ep_;
ep_ = nullptr;
own_endpoint_ = false;
handshake_mgr_->DoHandshake(ep, args, deadline_,
/*acceptor=*/nullptr, OnHandshakeDone,
/*user_data=*/this);
handshake_mgr_->DoHandshake(
nullptr, args, deadline_, /*acceptor=*/nullptr,
[self = Ref()](absl::StatusOr<HandshakerArgs*> result) {
self->OnHandshakeDone(std::move(result));
});
}
void HttpRequest::NextAddress(grpc_error_handle error) {

@ -186,7 +186,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
void DoRead() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
Ref().release(); // ref held by pending read
grpc_endpoint_read(ep_, &incoming_, &on_read_, /*urgent=*/true,
grpc_endpoint_read(ep_.get(), &incoming_, &on_read_, /*urgent=*/true,
/*min_progress_size=*/1);
}
@ -221,7 +221,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
void StartWrite() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static void OnHandshakeDone(void* arg, grpc_error_handle error);
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
void DoHandshake(const grpc_resolved_address* addr)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
@ -240,7 +240,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
grpc_closure continue_on_read_after_schedule_on_exec_ctx_;
grpc_closure done_write_;
grpc_closure continue_done_write_after_schedule_on_exec_ctx_;
grpc_endpoint* ep_ = nullptr;
OrphanablePtr<grpc_endpoint> ep_;
grpc_closure* on_done_;
ResourceQuotaRefPtr resource_quota_;
grpc_polling_entity* pollent_;
@ -248,7 +248,6 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
const absl::optional<std::function<void()>> test_only_generate_response_;
Mutex mu_;
RefCountedPtr<HandshakeManager> handshake_mgr_ ABSL_GUARDED_BY(mu_);
bool own_endpoint_ ABSL_GUARDED_BY(mu_) = true;
bool cancelled_ ABSL_GUARDED_BY(mu_) = false;
grpc_http_parser parser_ ABSL_GUARDED_BY(mu_);
std::vector<grpc_resolved_address> addresses_ ABSL_GUARDED_BY(mu_);

@ -419,7 +419,10 @@ class Channel(_base_channel.Channel):
# Locate ones created by `aio.Call`.
frame = stack[0]
candidate = frame.f_locals.get("self")
if candidate:
# Explicitly check for a non-null candidate instead of the more pythonic 'if candidate:'
# because doing 'if candidate:' assumes that the coroutine implements '__bool__' which
# might not always be the case.
if candidate is not None:
if isinstance(candidate, _base_call.Call):
if hasattr(candidate, "_channel"):
# For intercepted Call object

@ -692,6 +692,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/call_filters.cc',
'src/core/lib/transport/call_final_info.cc',
'src/core/lib/transport/call_spine.cc',
'src/core/lib/transport/call_state.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/interception_chain.cc',

@ -227,7 +227,7 @@ void grpc_run_bad_client_test(
grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(server_args.ToC().get()),
sfd.server, false);
grpc_core::OrphanablePtr<grpc_endpoint>(sfd.server), false);
server_setup_transport(&a, transport);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr,
nullptr);

@ -36,6 +36,7 @@
#include "src/core/channelz/channelz.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
@ -121,8 +122,9 @@ static void client_setup_transport(grpc_core::Transport* transport) {
static void init_client() {
grpc_core::ExecCtx exec_ctx;
grpc_core::Transport* transport;
transport = grpc_create_chttp2_transport(grpc_core::ChannelArgs(),
g_ctx.ep->client, true);
transport = grpc_create_chttp2_transport(
grpc_core::ChannelArgs(),
grpc_core::OrphanablePtr<grpc_endpoint>(g_ctx.ep->client), true);
client_setup_transport(transport);
CHECK(g_ctx.client);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr,
@ -136,8 +138,9 @@ static void init_server() {
g_ctx.server = grpc_server_create(nullptr, nullptr);
grpc_server_register_completion_queue(g_ctx.server, g_ctx.cq, nullptr);
grpc_server_start(g_ctx.server);
transport = grpc_create_chttp2_transport(grpc_core::ChannelArgs(),
g_ctx.ep->server, false);
transport = grpc_create_chttp2_transport(
grpc_core::ChannelArgs(),
grpc_core::OrphanablePtr<grpc_endpoint>(g_ctx.ep->server), false);
server_setup_transport(transport);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr,
nullptr);

@ -79,11 +79,12 @@ class SockpairFixture : public CoreTestFixture {
auto server_channel_args = CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(args.ToC().get());
auto* server_endpoint = std::exchange(ep_.server, nullptr);
OrphanablePtr<grpc_endpoint> server_endpoint(
std::exchange(ep_.server, nullptr));
EXPECT_NE(server_endpoint, nullptr);
grpc_endpoint_add_to_pollset(server_endpoint.get(), grpc_cq_pollset(cq));
transport = grpc_create_chttp2_transport(server_channel_args,
server_endpoint, false);
grpc_endpoint_add_to_pollset(server_endpoint, grpc_cq_pollset(cq));
std::move(server_endpoint), false);
Server* core_server = Server::FromC(server);
grpc_error_handle error = core_server->SetupTransport(
transport, nullptr, core_server->channel_args(), nullptr);
@ -106,9 +107,11 @@ class SockpairFixture : public CoreTestFixture {
.ToC()
.get());
Transport* transport;
auto* client_endpoint = std::exchange(ep_.client, nullptr);
OrphanablePtr<grpc_endpoint> client_endpoint(
std::exchange(ep_.client, nullptr));
EXPECT_NE(client_endpoint, nullptr);
transport = grpc_create_chttp2_transport(args, client_endpoint, true);
transport =
grpc_create_chttp2_transport(args, std::move(client_endpoint), true);
auto channel = ChannelCreate("socketpair-target", args,
GRPC_CLIENT_DIRECT_CHANNEL, transport);
grpc_channel* client;

@ -29,6 +29,7 @@
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/experiments/config.h"
#include "src/core/lib/gprpp/env.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
@ -69,7 +70,10 @@ class ClientFuzzer final : public BasicFuzzer {
.PreconditionChannelArgs(nullptr)
.SetIfUnset(GRPC_ARG_DEFAULT_AUTHORITY, "test-authority");
Transport* transport = grpc_create_chttp2_transport(
args, mock_endpoint_controller_->TakeCEndpoint(), true);
args,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
true);
channel_ = ChannelCreate("test-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport)
->release()

@ -90,7 +90,9 @@ CORE_END2END_TEST(RetryHttp2Test, MaxConnectionIdle) {
.Set(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS,
Duration::Seconds(1).millis())
.Set(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, Duration::Seconds(1).millis())
.Set(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, Duration::Seconds(5).millis())
.Set(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS,
g_is_fuzzing_core_e2e_tests ? Duration::Minutes(5).millis()
: Duration::Seconds(5).millis())
// Avoid transparent retries for this test.
.Set(GRPC_ARG_ENABLE_RETRIES, false));
InitServer(

@ -18,6 +18,8 @@
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/strings/string_view.h"
#include "gtest/gtest.h"
#include <grpc/grpc.h>
@ -28,6 +30,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
@ -49,15 +52,52 @@ namespace grpc_core {
class ReadAheadHandshaker : public Handshaker {
public:
~ReadAheadHandshaker() override {}
const char* name() const override { return "read_ahead"; }
void Shutdown(grpc_error_handle /*why*/) override {}
void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override {
grpc_endpoint_read(args->endpoint, args->read_buffer, on_handshake_done,
/*urgent=*/false, /*min_progress_size=*/1);
absl::string_view name() const override { return "read_ahead"; }
void DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) override {
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = std::move(on_handshake_done);
Ref().release(); // Held by callback.
GRPC_CLOSURE_INIT(&on_read_done_, OnReadDone, this, nullptr);
grpc_endpoint_read(args->endpoint.get(), args->read_buffer.c_slice_buffer(),
&on_read_done_, /*urgent=*/false,
/*min_progress_size=*/1);
}
void Shutdown(absl::Status /*error*/) override {
MutexLock lock(&mu_);
if (on_handshake_done_ != nullptr) args_->endpoint.reset();
}
private:
static void OnReadDone(void* arg, grpc_error_handle error) {
auto* self = static_cast<ReadAheadHandshaker*>(arg);
// Need an async hop here, because grpc_endpoint_read() may invoke
// the callback synchronously, leading to deadlock.
// TODO(roth): This async hop will no longer be necessary once we
// switch to the EventEngine endpoint API.
self->args_->event_engine->Run(
[self = RefCountedPtr<ReadAheadHandshaker>(self),
error = std::move(error)]() mutable {
absl::AnyInvocable<void(absl::Status)> on_handshake_done;
{
MutexLock lock(&self->mu_);
on_handshake_done = std::move(self->on_handshake_done_);
}
on_handshake_done(std::move(error));
});
}
grpc_closure on_read_done_;
Mutex mu_;
// Mutex guards args_->endpoint but not the rest of the struct.
HandshakerArgs* args_ = nullptr;
absl::AnyInvocable<void(absl::Status)> on_handshake_done_
ABSL_GUARDED_BY(&mu_);
};
class ReadAheadHandshakerFactory : public HandshakerFactory {

@ -161,9 +161,11 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
}
if (leftover_nslices == 0) {
f.client_ep = grpc_secure_endpoint_create(fake_read_protector,
fake_read_zero_copy_protector,
tcp.client, nullptr, &args, 0);
f.client_ep = grpc_secure_endpoint_create(
fake_read_protector, fake_read_zero_copy_protector,
grpc_core::OrphanablePtr<grpc_endpoint>(tcp.client),
nullptr, &args, 0)
.release();
} else {
unsigned i;
tsi_result result;
@ -206,15 +208,19 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
reinterpret_cast<const char*>(encrypted_buffer),
total_buffer_size - buffer_size);
f.client_ep = grpc_secure_endpoint_create(
fake_read_protector, fake_read_zero_copy_protector, tcp.client,
&encrypted_leftover, &args, 1);
fake_read_protector, fake_read_zero_copy_protector,
grpc_core::OrphanablePtr<grpc_endpoint>(tcp.client),
&encrypted_leftover, &args, 1)
.release();
grpc_slice_unref(encrypted_leftover);
gpr_free(encrypted_buffer);
}
f.server_ep = grpc_secure_endpoint_create(fake_write_protector,
fake_write_zero_copy_protector,
tcp.server, nullptr, &args, 0);
f.server_ep = grpc_secure_endpoint_create(
fake_write_protector, fake_write_zero_copy_protector,
grpc_core::OrphanablePtr<grpc_endpoint>(tcp.server),
nullptr, &args, 0)
.release();
grpc_resource_quota_unref(
static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
return f;

@ -15,7 +15,9 @@
// limitations under the License.
//
//
#include "absl/log/check.h"
#include "absl/synchronization/notification.h"
#include <grpc/credentials.h>
#include <grpc/event_engine/event_engine.h>
@ -35,6 +37,7 @@
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
using grpc_core::HandshakerArgs;
using grpc_event_engine::experimental::EventEngine;
using grpc_event_engine::experimental::GetDefaultEventEngine;
@ -43,20 +46,6 @@ bool squelch = true;
// Turning this on will fail the leak check.
bool leak_check = false;
struct handshake_state {
grpc_core::Notification done_signal;
};
static void on_handshake_done(void* arg, grpc_error_handle error) {
grpc_core::HandshakerArgs* args =
static_cast<grpc_core::HandshakerArgs*>(arg);
struct handshake_state* state =
static_cast<struct handshake_state*>(args->user_data);
// The fuzzer should not pass the handshake.
CHECK(!error.ok());
state->done_signal.Notify();
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
if (squelch) {
grpc_disable_all_absl_logs();
@ -91,21 +80,26 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::Timestamp deadline =
grpc_core::Duration::Seconds(1) + grpc_core::Timestamp::Now();
struct handshake_state state;
auto handshake_mgr =
grpc_core::MakeRefCounted<grpc_core::HandshakeManager>();
auto channel_args =
grpc_core::ChannelArgs().SetObject<EventEngine>(std::move(engine));
sc->add_handshakers(channel_args, nullptr, handshake_mgr.get());
handshake_mgr->DoHandshake(mock_endpoint_controller->TakeCEndpoint(),
absl::Notification handshake_completed;
handshake_mgr->DoHandshake(grpc_core::OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller->TakeCEndpoint()),
channel_args, deadline, nullptr /* acceptor */,
on_handshake_done, &state);
[&](absl::StatusOr<HandshakerArgs*> result) {
// The fuzzer should not pass the handshake.
CHECK(!result.ok());
handshake_completed.Notify();
});
grpc_core::ExecCtx::Get()->Flush();
// If the given string happens to be part of the correct client hello, the
// server will wait for more data. Explicitly fail the server by shutting
// down the handshake manager.
if (!state.done_signal.WaitForNotificationWithTimeout(absl::Seconds(3))) {
if (!handshake_completed.WaitForNotificationWithTimeout(absl::Seconds(3))) {
handshake_mgr->Shutdown(
absl::DeadlineExceededError("handshake did not fail as expected"));
}

@ -346,6 +346,5 @@ TEST(ChannelInitTest, CanCreateFilterWithCall) {
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
::testing::InitGoogleTest(&argc, argv);
grpc::testing::TestGrpcScope grpc_scope;
return RUN_ALL_TESTS();
}

@ -87,6 +87,21 @@ grpc_cc_test(
],
)
grpc_cc_test(
name = "call_state_test",
srcs = ["call_state_test.cc"],
external_deps = [
"gtest",
],
language = "C++",
uses_event_engine = False,
uses_polling = False,
deps = [
"//src/core:call_state",
"//test/core/promise:poll_matcher",
],
)
grpc_cc_test(
name = "connectivity_state_test",
srcs = ["connectivity_state_test.cc"],

@ -1099,246 +1099,6 @@ TEST(OperationExecutorTest, PromiseTwo) {
gpr_free_aligned(call_data1);
}
///////////////////////////////////////////////////////////////////////////////
// CallState
TEST(CallStateTest, NoOp) { CallState state; }
TEST(CallStateTest, StartTwiceCrashes) {
CallState state;
state.Start();
EXPECT_DEATH(state.Start(), "");
}
TEST(CallStateTest, PullServerInitialMetadataBlocksUntilStart) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady());
}
TEST(CallStateTest, PullClientInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_DEATH(state.FinishPullClientInitialMetadata(), "");
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
}
TEST(CallStateTest, ClientToServerMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPullClientInitialMetadata();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientInitialMetadata());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedClientToServerMessagesWithHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 3: push before polling and half close
state.BeginPushClientToServerMessage();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// ... and now we should see the half close
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ImmediateClientToServerHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ServerToClientMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity,
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(),
IsReady(true)));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerInitialMetadata());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedServerToClientMessages) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerInitialMetadata();
state.Start();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 3: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, ReceiveTrailersOnly) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, ReceiveTrailersOnlySkipsInitialMetadataOnUnstartedCalls) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, RecallNoCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(false));
}
TEST(CallStateTest, RecallCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(true);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(true));
}
TEST(CallStateTest, ReceiveTrailingMetadataAfterMessageRead) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerInitialMetadata();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerTrailingMetadata(false));
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(false));
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
}
} // namespace filters_detail
///////////////////////////////////////////////////////////////////////////////

@ -0,0 +1,310 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/call_state.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "test/core/promise/poll_matcher.h"
using testing::Mock;
using testing::StrictMock;
namespace grpc_core {
namespace {
// A mock activity that can be activated and deactivated.
class MockActivity : public Activity, public Wakeable {
public:
MOCK_METHOD(void, WakeupRequested, ());
void ForceImmediateRepoll(WakeupMask /*mask*/) override { WakeupRequested(); }
void Orphan() override {}
Waker MakeOwningWaker() override { return Waker(this, 0); }
Waker MakeNonOwningWaker() override { return Waker(this, 0); }
void Wakeup(WakeupMask /*mask*/) override { WakeupRequested(); }
void WakeupAsync(WakeupMask /*mask*/) override { WakeupRequested(); }
void Drop(WakeupMask /*mask*/) override {}
std::string DebugTag() const override { return "MockActivity"; }
std::string ActivityDebugTag(WakeupMask /*mask*/) const override {
return DebugTag();
}
void Activate() {
if (scoped_activity_ == nullptr) {
scoped_activity_ = std::make_unique<ScopedActivity>(this);
}
}
void Deactivate() { scoped_activity_.reset(); }
private:
std::unique_ptr<ScopedActivity> scoped_activity_;
};
#define EXPECT_WAKEUP(activity, statement) \
EXPECT_CALL((activity), WakeupRequested()).Times(::testing::AtLeast(1)); \
statement; \
Mock::VerifyAndClearExpectations(&(activity));
} // namespace
TEST(CallStateTest, NoOp) { CallState state; }
TEST(CallStateTest, StartTwiceCrashes) {
CallState state;
state.Start();
EXPECT_DEATH(state.Start(), "");
}
TEST(CallStateTest, PullServerInitialMetadataBlocksUntilStart) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady());
}
TEST(CallStateTest, PullClientInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_DEATH(state.FinishPullClientInitialMetadata(), "");
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
}
TEST(CallStateTest, ClientToServerMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
state.BeginPullClientInitialMetadata();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientInitialMetadata());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedClientToServerMessagesWithHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushClientToServerMessage();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// Message 3: push before polling and half close
state.BeginPushClientToServerMessage();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushClientToServerMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullClientToServerMessage());
EXPECT_THAT(state.PollPushClientToServerMessage(), IsReady(Success{}));
// ... and now we should see the half close
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ImmediateClientToServerHalfClose) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.BeginPullClientInitialMetadata();
state.FinishPullClientInitialMetadata();
state.ClientToServerHalfClose();
EXPECT_THAT(state.PollPullClientToServerMessageAvailable(), IsReady(false));
}
TEST(CallStateTest, ServerToClientMessagesWaitForInitialMetadata) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.Start());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerInitialMetadata());
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity,
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(),
IsReady(true)));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerInitialMetadata());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, RepeatedServerToClientMessages) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerInitialMetadata();
state.Start();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
// Message 0
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 1
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.BeginPushServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 2: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
// Message 3: push before polling
state.BeginPushServerToClientMessage();
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(true));
EXPECT_THAT(state.PollPushServerToClientMessage(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerToClientMessage());
EXPECT_THAT(state.PollPushServerToClientMessage(), IsReady(Success{}));
}
TEST(CallStateTest, ReceiveTrailersOnly) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, ReceiveTrailersOnlySkipsInitialMetadataOnUnstartedCalls) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
state.FinishPullServerTrailingMetadata();
}
TEST(CallStateTest, RecallNoCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(false);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(false));
}
TEST(CallStateTest, RecallCancellation) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerTrailingMetadata(true);
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(false));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
EXPECT_THAT(state.PollWasCancelled(), IsPending());
EXPECT_WAKEUP(activity, state.FinishPullServerTrailingMetadata());
EXPECT_THAT(state.PollWasCancelled(), IsReady(true));
}
TEST(CallStateTest, ReceiveTrailingMetadataAfterMessageRead) {
StrictMock<MockActivity> activity;
activity.Activate();
CallState state;
state.Start();
state.PushServerInitialMetadata();
EXPECT_THAT(state.PollPullServerInitialMetadataAvailable(), IsReady(true));
state.FinishPullServerInitialMetadata();
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsPending());
EXPECT_WAKEUP(activity, state.PushServerTrailingMetadata(false));
EXPECT_THAT(state.PollPullServerToClientMessageAvailable(), IsReady(false));
EXPECT_THAT(state.PollServerTrailingMetadataAvailable(), IsReady());
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc_tracer_init();
return RUN_ALL_TESTS();
}

@ -51,6 +51,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/notification.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
@ -94,8 +95,9 @@ class GracefulShutdownTest : public ::testing::Test {
grpc_server_register_completion_queue(server_, cq_, nullptr);
grpc_server_start(server_);
fds_ = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
auto* transport = grpc_create_chttp2_transport(core_server->channel_args(),
fds_.server, false);
auto* transport = grpc_create_chttp2_transport(
core_server->channel_args(), OrphanablePtr<grpc_endpoint>(fds_.server),
false);
grpc_endpoint_add_to_pollset(fds_.server, grpc_cq_pollset(cq_));
CHECK(core_server->SetupTransport(transport, nullptr,
core_server->channel_args(),

@ -57,7 +57,9 @@ TEST_F(ConfigurationTest, ClientKeepaliveDefaults) {
ExecCtx exec_ctx;
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/true));
EXPECT_EQ(t->keepalive_time, Duration::Infinity());
EXPECT_EQ(t->keepalive_timeout, Duration::Infinity());
@ -74,7 +76,9 @@ TEST_F(ConfigurationTest, ClientKeepaliveExplicitArgs) {
args_ = args_.Set(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 3);
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/true));
EXPECT_EQ(t->keepalive_time, Duration::Seconds(20));
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));
@ -87,7 +91,9 @@ TEST_F(ConfigurationTest, ServerKeepaliveDefaults) {
ExecCtx exec_ctx;
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/false));
EXPECT_EQ(t->keepalive_time, Duration::Hours(2));
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(20));
@ -111,7 +117,9 @@ TEST_F(ConfigurationTest, ServerKeepaliveExplicitArgs) {
args_ = args_.Set(GRPC_ARG_HTTP2_MAX_PING_STRIKES, 0);
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/false));
EXPECT_EQ(t->keepalive_time, Duration::Seconds(20));
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));
@ -140,7 +148,9 @@ TEST_F(ConfigurationTest, ModifyClientDefaults) {
// which does not override the defaults.
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/true));
EXPECT_EQ(t->keepalive_time, Duration::Seconds(20));
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));
@ -167,7 +177,9 @@ TEST_F(ConfigurationTest, ModifyServerDefaults) {
// which does not override the defaults.
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(grpc_create_chttp2_transport(
args_, mock_endpoint_controller_->TakeCEndpoint(),
args_,
OrphanablePtr<grpc_endpoint>(
mock_endpoint_controller_->TakeCEndpoint()),
/*is_client=*/false));
EXPECT_EQ(t->keepalive_time, Duration::Seconds(20));
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));

@ -181,7 +181,9 @@ class EndpointPairFixture : public BaseFixture {
grpc_core::Server::FromC(server_->c_server());
grpc_core::ChannelArgs server_args = core_server->channel_args();
server_transport_ = grpc_create_chttp2_transport(
server_args, endpoints.server, false /* is_client */);
server_args,
grpc_core::OrphanablePtr<grpc_endpoint>(endpoints.server),
/*is_client=*/false);
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(endpoints.server, pollset);
}
@ -207,8 +209,9 @@ class EndpointPairFixture : public BaseFixture {
.channel_args_preconditioning()
.PreconditionChannelArgs(&tmp_args);
}
client_transport_ =
grpc_create_chttp2_transport(c_args, endpoints.client, true);
client_transport_ = grpc_create_chttp2_transport(
c_args, grpc_core::OrphanablePtr<grpc_endpoint>(endpoints.client),
/*is_client=*/true);
CHECK(client_transport_);
grpc_channel* channel =
grpc_core::ChannelCreate("target", c_args, GRPC_CLIENT_DIRECT_CHANNEL,

@ -17,6 +17,7 @@
//
#include <chrono>
#include <utility>
#include <gtest/gtest.h>
@ -118,14 +119,14 @@ class InProcessCHTTP2 {
{
grpc_core::Server* core_server =
grpc_core::Server::FromC(server_->c_server());
grpc_endpoint* iomgr_server_endpoint =
grpc_event_engine_endpoint_create(std::move(listener_endpoint));
grpc_core::Transport* transport = grpc_create_chttp2_transport(
core_server->channel_args(), iomgr_server_endpoint,
/*is_client=*/false);
grpc_core::OrphanablePtr<grpc_endpoint> iomgr_server_endpoint(
grpc_event_engine_endpoint_create(std::move(listener_endpoint)));
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(iomgr_server_endpoint, pollset);
grpc_endpoint_add_to_pollset(iomgr_server_endpoint.get(), pollset);
}
grpc_core::Transport* transport = grpc_create_chttp2_transport(
core_server->channel_args(), std::move(iomgr_server_endpoint),
/*is_client=*/false);
CHECK(GRPC_LOG_IF_ERROR(
"SetupTransport",
core_server->SetupTransport(transport, nullptr,
@ -143,9 +144,10 @@ class InProcessCHTTP2 {
args = args.Set(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, INT_MAX)
.Set(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, INT_MAX)
.Set(GRPC_ARG_HTTP2_BDP_PROBE, 0);
grpc_core::OrphanablePtr<grpc_endpoint> endpoint(
grpc_event_engine_endpoint_create(std::move(client_endpoint)));
grpc_core::Transport* transport = grpc_create_chttp2_transport(
args, grpc_event_engine_endpoint_create(std::move(client_endpoint)),
/*is_client=*/true);
args, std::move(endpoint), /*is_client=*/true);
CHECK(transport);
grpc_channel* channel =
grpc_core::ChannelCreate("target", args, GRPC_CLIENT_DIRECT_CHANNEL,

@ -2699,6 +2699,8 @@ src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_final_info.h \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_spine.h \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/call_state.h \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/connectivity_state.h \
src/core/lib/transport/custom_metadata.h \

@ -2472,6 +2472,8 @@ src/core/lib/transport/call_final_info.cc \
src/core/lib/transport/call_final_info.h \
src/core/lib/transport/call_spine.cc \
src/core/lib/transport/call_spine.h \
src/core/lib/transport/call_state.cc \
src/core/lib/transport/call_state.h \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/connectivity_state.h \
src/core/lib/transport/custom_metadata.h \

@ -1525,6 +1525,30 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "call_state_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,

@ -101,7 +101,6 @@ DEPRECATED_FUNCTION_TEMP_ALLOW_LIST = {
"./src/core/lib/event_engine/posix_engine/timer_manager.cc",
"./src/core/lib/event_engine/windows/windows_endpoint.cc",
"./src/core/lib/event_engine/windows/windows_engine.cc",
"./src/core/lib/experiments/config.cc",
"./src/core/lib/gprpp/time.h",
"./src/core/lib/gprpp/work_serializer.cc",
"./src/core/lib/iomgr/call_combiner.cc",
@ -158,14 +157,6 @@ DEPRECATED_FUNCTION_TEMP_ALLOW_LIST = {
"./src/core/lib/transport/call_filters.cc",
"./src/core/lib/transport/connectivity_state.cc",
"./src/core/lib/transport/transport.h",
"./src/core/load_balancing/grpclb/grpclb.cc",
"./src/core/load_balancing/pick_first/pick_first.cc",
"./src/core/load_balancing/priority/priority.cc",
"./src/core/load_balancing/ring_hash/ring_hash.cc",
"./src/core/load_balancing/rls/rls.cc",
"./src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc",
"./src/core/load_balancing/xds/cds.cc",
"./src/core/load_balancing/xds/xds_cluster_manager.cc",
"./src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc",
"./src/core/resolver/dns/c_ares/grpc_ares_wrapper.h",
"./src/core/resolver/dns/event_engine/event_engine_client_channel_resolver.cc",

Loading…
Cancel
Save