[call-v3] Client call implementation (#36724)

Closes #36724

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/36724 from ctiller:transport-refs-8 51cf92ab82
PiperOrigin-RevId: 640558652
pull/36821/head
Craig Tiller 6 months ago committed by Copybara-Service
parent e55f69cedd
commit 90a649fd3f
  1. 21
      BUILD
  2. 541
      CMakeLists.txt
  3. 6
      Makefile
  4. 12
      Package.swift
  5. 10
      bazel/experiments.bzl
  6. 1074
      build_autogenerated.yaml
  7. 6
      config.m4
  8. 6
      config.w32
  9. 12
      gRPC-C++.podspec
  10. 18
      gRPC-Core.podspec
  11. 12
      grpc.gemspec
  12. 12
      package.xml
  13. 97
      src/core/client_channel/client_channel.cc
  14. 6
      src/core/client_channel/client_channel.h
  15. 297
      src/core/client_channel/client_channel_filter.cc
  16. 41
      src/core/client_channel/client_channel_filter.h
  17. 15
      src/core/client_channel/client_channel_plugin.cc
  18. 20
      src/core/client_channel/load_balanced_call_destination.cc
  19. 2
      src/core/client_channel/retry_filter.cc
  20. 37
      src/core/client_channel/subchannel.cc
  21. 2
      src/core/client_channel/subchannel.h
  22. 16
      src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc
  23. 9
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  24. 24
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  25. 44
      src/core/ext/transport/chaotic_good/server_transport.cc
  26. 2
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  27. 10
      src/core/ext/transport/inproc/inproc_transport.cc
  28. 32
      src/core/lib/channel/channel_stack.cc
  29. 25
      src/core/lib/channel/channel_stack.h
  30. 5
      src/core/lib/channel/channel_stack_builder.h
  31. 141
      src/core/lib/channel/channel_stack_builder_impl.cc
  32. 2
      src/core/lib/channel/channel_stack_builder_impl.h
  33. 710
      src/core/lib/channel/connected_channel.cc
  34. 7
      src/core/lib/channel/promise_based_filter.cc
  35. 476
      src/core/lib/channel/promise_based_filter.h
  36. 99
      src/core/lib/experiments/experiments.cc
  37. 24
      src/core/lib/experiments/experiments.h
  38. 23
      src/core/lib/experiments/experiments.yaml
  39. 2
      src/core/lib/gprpp/dual_ref_counted.h
  40. 9
      src/core/lib/gprpp/ref_counted.h
  41. 8
      src/core/lib/gprpp/single_set_ptr.h
  42. 14
      src/core/lib/promise/cancel_callback.h
  43. 5
      src/core/lib/promise/party.h
  44. 3264
      src/core/lib/surface/call.cc
  45. 105
      src/core/lib/surface/call.h
  46. 286
      src/core/lib/surface/call_utils.cc
  47. 457
      src/core/lib/surface/call_utils.h
  48. 17
      src/core/lib/surface/channel.h
  49. 6
      src/core/lib/surface/channel_create.cc
  50. 5
      src/core/lib/surface/channel_create.h
  51. 4
      src/core/lib/surface/channel_init.cc
  52. 423
      src/core/lib/surface/client_call.cc
  53. 179
      src/core/lib/surface/client_call.h
  54. 1164
      src/core/lib/surface/filter_stack_call.cc
  55. 370
      src/core/lib/surface/filter_stack_call.h
  56. 30
      src/core/lib/surface/legacy_channel.cc
  57. 12
      src/core/lib/surface/legacy_channel.h
  58. 224
      src/core/lib/surface/server_call.cc
  59. 167
      src/core/lib/surface/server_call.h
  60. 75
      src/core/lib/surface/wait_for_cq_end_op.cc
  61. 72
      src/core/lib/surface/wait_for_cq_end_op.h
  62. 171
      src/core/lib/transport/batch_builder.cc
  63. 474
      src/core/lib/transport/batch_builder.h
  64. 4
      src/core/lib/transport/call_filters.cc
  65. 384
      src/core/lib/transport/call_spine.h
  66. 2
      src/core/load_balancing/grpclb/grpclb.cc
  67. 13
      src/core/load_balancing/lb_policy.h
  68. 2
      src/core/load_balancing/rls/rls.cc
  69. 3
      src/core/plugin_registry/grpc_plugin_registry.cc
  70. 25
      src/core/server/server.cc
  71. 5
      src/core/server/server.h
  72. 1
      src/core/server/server_call_tracer_filter.cc
  73. 4
      src/core/xds/grpc/xds_transport_grpc.cc
  74. 2
      src/core/xds/grpc/xds_transport_grpc.h
  75. 6
      src/python/grpcio/grpc_core_dependencies.py
  76. 79
      test/core/call/BUILD
  77. 208
      test/core/call/batch_builder.cc
  78. 261
      test/core/call/batch_builder.h
  79. 72
      test/core/call/call_utils_test.cc
  80. 251
      test/core/call/client_call_test.cc
  81. 1
      test/core/call/corpus/client_call/empty
  82. 1
      test/core/call/corpus/server_call/empty
  83. 138
      test/core/call/server_call_test.cc
  84. 3
      test/core/call/yodel/BUILD
  85. 13
      test/core/call/yodel/yodel_test.cc
  86. 1
      test/core/call/yodel/yodel_test.h
  87. 15
      test/core/channel/channel_stack_builder_test.cc
  88. 2
      test/core/channel/channel_stack_test.cc
  89. 32
      test/core/client_channel/client_channel_test.cc
  90. 43
      test/core/client_channel/load_balanced_call_destination_test.cc
  91. 1
      test/core/end2end/BUILD
  92. 31
      test/core/end2end/end2end_test_suites.cc
  93. 199
      test/core/end2end/end2end_tests.cc
  94. 245
      test/core/end2end/end2end_tests.h
  95. 12
      test/core/end2end/tests/bad_ping.cc
  96. 10
      test/core/end2end/tests/binary_metadata.cc
  97. 42
      test/core/end2end/tests/call_creds.cc
  98. 6
      test/core/end2end/tests/call_host_override.cc
  99. 10
      test/core/end2end/tests/cancel_after_accept.cc
  100. 10
      test/core/end2end/tests/cancel_after_client_done.cc
  101. Some files were not shown because too many files have changed in this diff Show More

21
BUILD

@ -1800,6 +1800,7 @@ grpc_cc_library(
"stats", "stats",
"//src/core:arena", "//src/core:arena",
"//src/core:call_arena_allocator", "//src/core:call_arena_allocator",
"//src/core:call_destination",
"//src/core:channel_args", "//src/core:channel_args",
"//src/core:channel_stack_type", "//src/core:channel_stack_type",
"//src/core:compression", "//src/core:compression",
@ -1992,15 +1993,17 @@ grpc_cc_library(
"//src/core:lib/surface/call.cc", "//src/core:lib/surface/call.cc",
"//src/core:lib/surface/call_details.cc", "//src/core:lib/surface/call_details.cc",
"//src/core:lib/surface/call_log_batch.cc", "//src/core:lib/surface/call_log_batch.cc",
"//src/core:lib/surface/call_utils.cc",
"//src/core:lib/surface/client_call.cc",
"//src/core:lib/surface/completion_queue.cc", "//src/core:lib/surface/completion_queue.cc",
"//src/core:lib/surface/completion_queue_factory.cc", "//src/core:lib/surface/completion_queue_factory.cc",
"//src/core:lib/surface/event_string.cc", "//src/core:lib/surface/event_string.cc",
"//src/core:lib/surface/filter_stack_call.cc",
"//src/core:lib/surface/lame_client.cc", "//src/core:lib/surface/lame_client.cc",
"//src/core:lib/surface/metadata_array.cc", "//src/core:lib/surface/metadata_array.cc",
"//src/core:lib/surface/server_call.cc",
"//src/core:lib/surface/validate_metadata.cc", "//src/core:lib/surface/validate_metadata.cc",
"//src/core:lib/surface/version.cc", "//src/core:lib/surface/version.cc",
"//src/core:lib/surface/wait_for_cq_end_op.cc",
"//src/core:lib/transport/batch_builder.cc",
"//src/core:lib/transport/transport.cc", "//src/core:lib/transport/transport.cc",
"//src/core:lib/transport/transport_op_string.cc", "//src/core:lib/transport/transport_op_string.cc",
], ],
@ -2013,14 +2016,16 @@ grpc_cc_library(
"//src/core:lib/compression/message_compress.h", "//src/core:lib/compression/message_compress.h",
"//src/core:lib/surface/call.h", "//src/core:lib/surface/call.h",
"//src/core:lib/surface/call_test_only.h", "//src/core:lib/surface/call_test_only.h",
"//src/core:lib/surface/call_utils.h",
"//src/core:lib/surface/client_call.h",
"//src/core:lib/surface/completion_queue.h", "//src/core:lib/surface/completion_queue.h",
"//src/core:lib/surface/completion_queue_factory.h", "//src/core:lib/surface/completion_queue_factory.h",
"//src/core:lib/surface/event_string.h", "//src/core:lib/surface/event_string.h",
"//src/core:lib/surface/filter_stack_call.h",
"//src/core:lib/surface/init.h", "//src/core:lib/surface/init.h",
"//src/core:lib/surface/lame_client.h", "//src/core:lib/surface/lame_client.h",
"//src/core:lib/surface/server_call.h",
"//src/core:lib/surface/validate_metadata.h", "//src/core:lib/surface/validate_metadata.h",
"//src/core:lib/surface/wait_for_cq_end_op.h",
"//src/core:lib/transport/batch_builder.h",
"//src/core:lib/transport/transport.h", "//src/core:lib/transport/transport.h",
], ],
defines = select({ defines = select({
@ -2033,8 +2038,8 @@ grpc_cc_library(
"absl/container:inlined_vector", "absl/container:inlined_vector",
"absl/functional:any_invocable", "absl/functional:any_invocable",
"absl/functional:function_ref", "absl/functional:function_ref",
"absl/log",
"absl/log:check", "absl/log:check",
"absl/log:log",
"absl/meta:type_traits", "absl/meta:type_traits",
"absl/status", "absl/status",
"absl/status:statusor", "absl/status:statusor",
@ -2065,6 +2070,7 @@ grpc_cc_library(
"debug_location", "debug_location",
"exec_ctx", "exec_ctx",
"gpr", "gpr",
"grpc_core_credentials_header",
"grpc_public_hdrs", "grpc_public_hdrs",
"grpc_trace", "grpc_trace",
"iomgr", "iomgr",
@ -2125,6 +2131,7 @@ grpc_cc_library(
"//src/core:ref_counted", "//src/core:ref_counted",
"//src/core:seq", "//src/core:seq",
"//src/core:server_interface", "//src/core:server_interface",
"//src/core:single_set_ptr",
"//src/core:slice", "//src/core:slice",
"//src/core:slice_buffer", "//src/core:slice_buffer",
"//src/core:slice_cast", "//src/core:slice_cast",
@ -3713,8 +3720,8 @@ grpc_cc_library(
"absl/container:flat_hash_set", "absl/container:flat_hash_set",
"absl/container:inlined_vector", "absl/container:inlined_vector",
"absl/functional:any_invocable", "absl/functional:any_invocable",
"absl/log",
"absl/log:check", "absl/log:check",
"absl/log:log",
"absl/status", "absl/status",
"absl/status:statusor", "absl/status:statusor",
"absl/strings", "absl/strings",
@ -3751,7 +3758,6 @@ grpc_cc_library(
"stats", "stats",
"uri_parser", "uri_parser",
"work_serializer", "work_serializer",
"//src/core:activity",
"//src/core:arena", "//src/core:arena",
"//src/core:arena_promise", "//src/core:arena_promise",
"//src/core:backend_metric_parser", "//src/core:backend_metric_parser",
@ -3811,7 +3817,6 @@ grpc_cc_library(
"//src/core:slice_buffer", "//src/core:slice_buffer",
"//src/core:slice_refcount", "//src/core:slice_refcount",
"//src/core:stats_data", "//src/core:stats_data",
"//src/core:status_flag",
"//src/core:status_helper", "//src/core:status_helper",
"//src/core:subchannel_connector", "//src/core:subchannel_connector",
"//src/core:subchannel_interface", "//src/core:subchannel_interface",

541
CMakeLists.txt generated

File diff suppressed because it is too large Load Diff

6
Makefile generated

@ -1326,22 +1326,24 @@ LIBGRPC_SRC = \
src/core/lib/surface/call.cc \ src/core/lib/surface/call.cc \
src/core/lib/surface/call_details.cc \ src/core/lib/surface/call_details.cc \
src/core/lib/surface/call_log_batch.cc \ src/core/lib/surface/call_log_batch.cc \
src/core/lib/surface/call_utils.cc \
src/core/lib/surface/channel.cc \ src/core/lib/surface/channel.cc \
src/core/lib/surface/channel_create.cc \ src/core/lib/surface/channel_create.cc \
src/core/lib/surface/channel_init.cc \ src/core/lib/surface/channel_init.cc \
src/core/lib/surface/channel_stack_type.cc \ src/core/lib/surface/channel_stack_type.cc \
src/core/lib/surface/client_call.cc \
src/core/lib/surface/completion_queue.cc \ src/core/lib/surface/completion_queue.cc \
src/core/lib/surface/completion_queue_factory.cc \ src/core/lib/surface/completion_queue_factory.cc \
src/core/lib/surface/event_string.cc \ src/core/lib/surface/event_string.cc \
src/core/lib/surface/filter_stack_call.cc \
src/core/lib/surface/init.cc \ src/core/lib/surface/init.cc \
src/core/lib/surface/init_internally.cc \ src/core/lib/surface/init_internally.cc \
src/core/lib/surface/lame_client.cc \ src/core/lib/surface/lame_client.cc \
src/core/lib/surface/legacy_channel.cc \ src/core/lib/surface/legacy_channel.cc \
src/core/lib/surface/metadata_array.cc \ src/core/lib/surface/metadata_array.cc \
src/core/lib/surface/server_call.cc \
src/core/lib/surface/validate_metadata.cc \ src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \ src/core/lib/surface/version.cc \
src/core/lib/surface/wait_for_cq_end_op.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \ src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/call_arena_allocator.cc \ src/core/lib/transport/call_arena_allocator.cc \
src/core/lib/transport/call_filters.cc \ src/core/lib/transport/call_filters.cc \

12
Package.swift generated

@ -1656,6 +1656,8 @@ let package = Package(
"src/core/lib/surface/call_log_batch.cc", "src/core/lib/surface/call_log_batch.cc",
"src/core/lib/surface/call_test_only.h", "src/core/lib/surface/call_test_only.h",
"src/core/lib/surface/call_trace.h", "src/core/lib/surface/call_trace.h",
"src/core/lib/surface/call_utils.cc",
"src/core/lib/surface/call_utils.h",
"src/core/lib/surface/channel.cc", "src/core/lib/surface/channel.cc",
"src/core/lib/surface/channel.h", "src/core/lib/surface/channel.h",
"src/core/lib/surface/channel_create.cc", "src/core/lib/surface/channel_create.cc",
@ -1664,12 +1666,16 @@ let package = Package(
"src/core/lib/surface/channel_init.h", "src/core/lib/surface/channel_init.h",
"src/core/lib/surface/channel_stack_type.cc", "src/core/lib/surface/channel_stack_type.cc",
"src/core/lib/surface/channel_stack_type.h", "src/core/lib/surface/channel_stack_type.h",
"src/core/lib/surface/client_call.cc",
"src/core/lib/surface/client_call.h",
"src/core/lib/surface/completion_queue.cc", "src/core/lib/surface/completion_queue.cc",
"src/core/lib/surface/completion_queue.h", "src/core/lib/surface/completion_queue.h",
"src/core/lib/surface/completion_queue_factory.cc", "src/core/lib/surface/completion_queue_factory.cc",
"src/core/lib/surface/completion_queue_factory.h", "src/core/lib/surface/completion_queue_factory.h",
"src/core/lib/surface/event_string.cc", "src/core/lib/surface/event_string.cc",
"src/core/lib/surface/event_string.h", "src/core/lib/surface/event_string.h",
"src/core/lib/surface/filter_stack_call.cc",
"src/core/lib/surface/filter_stack_call.h",
"src/core/lib/surface/init.cc", "src/core/lib/surface/init.cc",
"src/core/lib/surface/init.h", "src/core/lib/surface/init.h",
"src/core/lib/surface/init_internally.cc", "src/core/lib/surface/init_internally.cc",
@ -1679,13 +1685,11 @@ let package = Package(
"src/core/lib/surface/legacy_channel.cc", "src/core/lib/surface/legacy_channel.cc",
"src/core/lib/surface/legacy_channel.h", "src/core/lib/surface/legacy_channel.h",
"src/core/lib/surface/metadata_array.cc", "src/core/lib/surface/metadata_array.cc",
"src/core/lib/surface/server_call.cc",
"src/core/lib/surface/server_call.h",
"src/core/lib/surface/validate_metadata.cc", "src/core/lib/surface/validate_metadata.cc",
"src/core/lib/surface/validate_metadata.h", "src/core/lib/surface/validate_metadata.h",
"src/core/lib/surface/version.cc", "src/core/lib/surface/version.cc",
"src/core/lib/surface/wait_for_cq_end_op.cc",
"src/core/lib/surface/wait_for_cq_end_op.h",
"src/core/lib/transport/batch_builder.cc",
"src/core/lib/transport/batch_builder.h",
"src/core/lib/transport/bdp_estimator.cc", "src/core/lib/transport/bdp_estimator.cc",
"src/core/lib/transport/bdp_estimator.h", "src/core/lib/transport/bdp_estimator.h",
"src/core/lib/transport/call_arena_allocator.cc", "src/core/lib/transport/call_arena_allocator.cc",

@ -32,9 +32,7 @@ EXPERIMENT_ENABLES = {
"multiping": "multiping", "multiping": "multiping",
"peer_state_based_framing": "peer_state_based_framing", "peer_state_based_framing": "peer_state_based_framing",
"pick_first_new": "pick_first_new", "pick_first_new": "pick_first_new",
"promise_based_client_call": "event_engine_client,event_engine_listener,promise_based_client_call", "promise_based_inproc_transport": "promise_based_inproc_transport",
"chaotic_good": "chaotic_good,event_engine_client,event_engine_listener,promise_based_client_call",
"promise_based_inproc_transport": "event_engine_client,event_engine_listener,promise_based_client_call,promise_based_inproc_transport",
"rstpit": "rstpit", "rstpit": "rstpit",
"schedule_cancellation_over_write": "schedule_cancellation_over_write", "schedule_cancellation_over_write": "schedule_cancellation_over_write",
"server_privacy": "server_privacy", "server_privacy": "server_privacy",
@ -44,7 +42,6 @@ EXPERIMENT_ENABLES = {
"unconstrained_max_quota_buffer_size": "unconstrained_max_quota_buffer_size", "unconstrained_max_quota_buffer_size": "unconstrained_max_quota_buffer_size",
"work_serializer_clears_time_cache": "work_serializer_clears_time_cache", "work_serializer_clears_time_cache": "work_serializer_clears_time_cache",
"work_serializer_dispatch": "event_engine_client,work_serializer_dispatch", "work_serializer_dispatch": "event_engine_client,work_serializer_dispatch",
"call_v3": "call_v3,event_engine_client,event_engine_listener,work_serializer_dispatch",
} }
EXPERIMENT_POLLERS = [ EXPERIMENT_POLLERS = [
@ -141,9 +138,7 @@ EXPERIMENTS = {
}, },
"off": { "off": {
"core_end2end_test": [ "core_end2end_test": [
"chaotic_good",
"event_engine_client", "event_engine_client",
"promise_based_client_call",
], ],
"endpoint_test": [ "endpoint_test": [
"tcp_frame_size_tuning", "tcp_frame_size_tuning",
@ -159,9 +154,6 @@ EXPERIMENTS = {
"tcp_frame_size_tuning", "tcp_frame_size_tuning",
"tcp_rcv_lowat", "tcp_rcv_lowat",
], ],
"lame_client_test": [
"promise_based_client_call",
],
"resource_quota_test": [ "resource_quota_test": [
"free_large_allocator", "free_large_allocator",
"unconstrained_max_quota_buffer_size", "unconstrained_max_quota_buffer_size",

File diff suppressed because it is too large Load Diff

6
config.m4 generated

@ -701,22 +701,24 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/surface/call.cc \ src/core/lib/surface/call.cc \
src/core/lib/surface/call_details.cc \ src/core/lib/surface/call_details.cc \
src/core/lib/surface/call_log_batch.cc \ src/core/lib/surface/call_log_batch.cc \
src/core/lib/surface/call_utils.cc \
src/core/lib/surface/channel.cc \ src/core/lib/surface/channel.cc \
src/core/lib/surface/channel_create.cc \ src/core/lib/surface/channel_create.cc \
src/core/lib/surface/channel_init.cc \ src/core/lib/surface/channel_init.cc \
src/core/lib/surface/channel_stack_type.cc \ src/core/lib/surface/channel_stack_type.cc \
src/core/lib/surface/client_call.cc \
src/core/lib/surface/completion_queue.cc \ src/core/lib/surface/completion_queue.cc \
src/core/lib/surface/completion_queue_factory.cc \ src/core/lib/surface/completion_queue_factory.cc \
src/core/lib/surface/event_string.cc \ src/core/lib/surface/event_string.cc \
src/core/lib/surface/filter_stack_call.cc \
src/core/lib/surface/init.cc \ src/core/lib/surface/init.cc \
src/core/lib/surface/init_internally.cc \ src/core/lib/surface/init_internally.cc \
src/core/lib/surface/lame_client.cc \ src/core/lib/surface/lame_client.cc \
src/core/lib/surface/legacy_channel.cc \ src/core/lib/surface/legacy_channel.cc \
src/core/lib/surface/metadata_array.cc \ src/core/lib/surface/metadata_array.cc \
src/core/lib/surface/server_call.cc \
src/core/lib/surface/validate_metadata.cc \ src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \ src/core/lib/surface/version.cc \
src/core/lib/surface/wait_for_cq_end_op.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \ src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/call_arena_allocator.cc \ src/core/lib/transport/call_arena_allocator.cc \
src/core/lib/transport/call_filters.cc \ src/core/lib/transport/call_filters.cc \

6
config.w32 generated

@ -666,22 +666,24 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\surface\\call.cc " + "src\\core\\lib\\surface\\call.cc " +
"src\\core\\lib\\surface\\call_details.cc " + "src\\core\\lib\\surface\\call_details.cc " +
"src\\core\\lib\\surface\\call_log_batch.cc " + "src\\core\\lib\\surface\\call_log_batch.cc " +
"src\\core\\lib\\surface\\call_utils.cc " +
"src\\core\\lib\\surface\\channel.cc " + "src\\core\\lib\\surface\\channel.cc " +
"src\\core\\lib\\surface\\channel_create.cc " + "src\\core\\lib\\surface\\channel_create.cc " +
"src\\core\\lib\\surface\\channel_init.cc " + "src\\core\\lib\\surface\\channel_init.cc " +
"src\\core\\lib\\surface\\channel_stack_type.cc " + "src\\core\\lib\\surface\\channel_stack_type.cc " +
"src\\core\\lib\\surface\\client_call.cc " +
"src\\core\\lib\\surface\\completion_queue.cc " + "src\\core\\lib\\surface\\completion_queue.cc " +
"src\\core\\lib\\surface\\completion_queue_factory.cc " + "src\\core\\lib\\surface\\completion_queue_factory.cc " +
"src\\core\\lib\\surface\\event_string.cc " + "src\\core\\lib\\surface\\event_string.cc " +
"src\\core\\lib\\surface\\filter_stack_call.cc " +
"src\\core\\lib\\surface\\init.cc " + "src\\core\\lib\\surface\\init.cc " +
"src\\core\\lib\\surface\\init_internally.cc " + "src\\core\\lib\\surface\\init_internally.cc " +
"src\\core\\lib\\surface\\lame_client.cc " + "src\\core\\lib\\surface\\lame_client.cc " +
"src\\core\\lib\\surface\\legacy_channel.cc " + "src\\core\\lib\\surface\\legacy_channel.cc " +
"src\\core\\lib\\surface\\metadata_array.cc " + "src\\core\\lib\\surface\\metadata_array.cc " +
"src\\core\\lib\\surface\\server_call.cc " +
"src\\core\\lib\\surface\\validate_metadata.cc " + "src\\core\\lib\\surface\\validate_metadata.cc " +
"src\\core\\lib\\surface\\version.cc " + "src\\core\\lib\\surface\\version.cc " +
"src\\core\\lib\\surface\\wait_for_cq_end_op.cc " +
"src\\core\\lib\\transport\\batch_builder.cc " +
"src\\core\\lib\\transport\\bdp_estimator.cc " + "src\\core\\lib\\transport\\bdp_estimator.cc " +
"src\\core\\lib\\transport\\call_arena_allocator.cc " + "src\\core\\lib\\transport\\call_arena_allocator.cc " +
"src\\core\\lib\\transport\\call_filters.cc " + "src\\core\\lib\\transport\\call_filters.cc " +

12
gRPC-C++.podspec generated

@ -1176,20 +1176,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h', 'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h', 'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h', 'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h', 'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h', 'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h', 'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h', 'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h', 'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h', 'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h', 'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h', 'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h', 'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h', 'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h', 'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h', 'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h', 'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h', 'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h', 'src/core/lib/transport/call_destination.h',
@ -2450,20 +2452,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h', 'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h', 'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h', 'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h', 'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h', 'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h', 'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h', 'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h', 'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h', 'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h', 'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h', 'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h', 'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h', 'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h', 'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h', 'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h', 'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h', 'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h', 'src/core/lib/transport/call_destination.h',

18
gRPC-Core.podspec generated

@ -1771,6 +1771,8 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call_log_batch.cc', 'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/call_test_only.h', 'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h', 'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.cc',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.cc', 'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel.h', 'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.cc', 'src/core/lib/surface/channel_create.cc',
@ -1779,12 +1781,16 @@ Pod::Spec.new do |s|
'src/core/lib/surface/channel_init.h', 'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.cc', 'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/channel_stack_type.h', 'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.cc',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.cc', 'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue.h', 'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.cc', 'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/completion_queue_factory.h', 'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.cc', 'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/event_string.h', 'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.cc',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.cc', 'src/core/lib/surface/init.cc',
'src/core/lib/surface/init.h', 'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.cc', 'src/core/lib/surface/init_internally.cc',
@ -1794,13 +1800,11 @@ Pod::Spec.new do |s|
'src/core/lib/surface/legacy_channel.cc', 'src/core/lib/surface/legacy_channel.cc',
'src/core/lib/surface/legacy_channel.h', 'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/metadata_array.cc', 'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server_call.cc',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.cc', 'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/validate_metadata.h', 'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/version.cc', 'src/core/lib/surface/version.cc',
'src/core/lib/surface/wait_for_cq_end_op.cc',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.cc', 'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/bdp_estimator.h', 'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.cc', 'src/core/lib/transport/call_arena_allocator.cc',
@ -3232,20 +3236,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h', 'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h', 'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h', 'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h', 'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h', 'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h', 'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h', 'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h', 'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h', 'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h', 'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h', 'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h', 'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h', 'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h', 'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h', 'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h', 'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h', 'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h', 'src/core/lib/transport/call_destination.h',

12
grpc.gemspec generated

@ -1658,6 +1658,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/call_log_batch.cc ) s.files += %w( src/core/lib/surface/call_log_batch.cc )
s.files += %w( src/core/lib/surface/call_test_only.h ) s.files += %w( src/core/lib/surface/call_test_only.h )
s.files += %w( src/core/lib/surface/call_trace.h ) s.files += %w( src/core/lib/surface/call_trace.h )
s.files += %w( src/core/lib/surface/call_utils.cc )
s.files += %w( src/core/lib/surface/call_utils.h )
s.files += %w( src/core/lib/surface/channel.cc ) s.files += %w( src/core/lib/surface/channel.cc )
s.files += %w( src/core/lib/surface/channel.h ) s.files += %w( src/core/lib/surface/channel.h )
s.files += %w( src/core/lib/surface/channel_create.cc ) s.files += %w( src/core/lib/surface/channel_create.cc )
@ -1666,12 +1668,16 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/channel_init.h ) s.files += %w( src/core/lib/surface/channel_init.h )
s.files += %w( src/core/lib/surface/channel_stack_type.cc ) s.files += %w( src/core/lib/surface/channel_stack_type.cc )
s.files += %w( src/core/lib/surface/channel_stack_type.h ) s.files += %w( src/core/lib/surface/channel_stack_type.h )
s.files += %w( src/core/lib/surface/client_call.cc )
s.files += %w( src/core/lib/surface/client_call.h )
s.files += %w( src/core/lib/surface/completion_queue.cc ) s.files += %w( src/core/lib/surface/completion_queue.cc )
s.files += %w( src/core/lib/surface/completion_queue.h ) s.files += %w( src/core/lib/surface/completion_queue.h )
s.files += %w( src/core/lib/surface/completion_queue_factory.cc ) s.files += %w( src/core/lib/surface/completion_queue_factory.cc )
s.files += %w( src/core/lib/surface/completion_queue_factory.h ) s.files += %w( src/core/lib/surface/completion_queue_factory.h )
s.files += %w( src/core/lib/surface/event_string.cc ) s.files += %w( src/core/lib/surface/event_string.cc )
s.files += %w( src/core/lib/surface/event_string.h ) s.files += %w( src/core/lib/surface/event_string.h )
s.files += %w( src/core/lib/surface/filter_stack_call.cc )
s.files += %w( src/core/lib/surface/filter_stack_call.h )
s.files += %w( src/core/lib/surface/init.cc ) s.files += %w( src/core/lib/surface/init.cc )
s.files += %w( src/core/lib/surface/init.h ) s.files += %w( src/core/lib/surface/init.h )
s.files += %w( src/core/lib/surface/init_internally.cc ) s.files += %w( src/core/lib/surface/init_internally.cc )
@ -1681,13 +1687,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/legacy_channel.cc ) s.files += %w( src/core/lib/surface/legacy_channel.cc )
s.files += %w( src/core/lib/surface/legacy_channel.h ) s.files += %w( src/core/lib/surface/legacy_channel.h )
s.files += %w( src/core/lib/surface/metadata_array.cc ) s.files += %w( src/core/lib/surface/metadata_array.cc )
s.files += %w( src/core/lib/surface/server_call.cc )
s.files += %w( src/core/lib/surface/server_call.h )
s.files += %w( src/core/lib/surface/validate_metadata.cc ) s.files += %w( src/core/lib/surface/validate_metadata.cc )
s.files += %w( src/core/lib/surface/validate_metadata.h ) s.files += %w( src/core/lib/surface/validate_metadata.h )
s.files += %w( src/core/lib/surface/version.cc ) s.files += %w( src/core/lib/surface/version.cc )
s.files += %w( src/core/lib/surface/wait_for_cq_end_op.cc )
s.files += %w( src/core/lib/surface/wait_for_cq_end_op.h )
s.files += %w( src/core/lib/transport/batch_builder.cc )
s.files += %w( src/core/lib/transport/batch_builder.h )
s.files += %w( src/core/lib/transport/bdp_estimator.cc ) s.files += %w( src/core/lib/transport/bdp_estimator.cc )
s.files += %w( src/core/lib/transport/bdp_estimator.h ) s.files += %w( src/core/lib/transport/bdp_estimator.h )
s.files += %w( src/core/lib/transport/call_arena_allocator.cc ) s.files += %w( src/core/lib/transport/call_arena_allocator.cc )

12
package.xml generated

@ -1640,6 +1640,8 @@
<file baseinstalldir="/" name="src/core/lib/surface/call_log_batch.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/call_log_batch.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_test_only.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/call_test_only.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_trace.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/call_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_create.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel_create.cc" role="src" />
@ -1648,12 +1650,16 @@
<file baseinstalldir="/" name="src/core/lib/surface/channel_init.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel_init.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/client_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/client_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/completion_queue.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/completion_queue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/event_string.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/event_string.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/event_string.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/event_string.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/filter_stack_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/filter_stack_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/init.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/init.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init_internally.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/init_internally.cc" role="src" />
@ -1663,13 +1669,11 @@
<file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/metadata_array.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/metadata_array.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/server_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/server_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/version.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/version.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/wait_for_cq_end_op.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/wait_for_cq_end_op.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_arena_allocator.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/transport/call_arena_allocator.cc" role="src" />

@ -48,64 +48,47 @@
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/client_channel/backup_poller.h"
#include "src/core/client_channel/client_channel_internal.h" #include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/client_channel_service_config.h" #include "src/core/client_channel/client_channel_service_config.h"
#include "src/core/client_channel/config_selector.h" #include "src/core/client_channel/config_selector.h"
#include "src/core/client_channel/dynamic_filters.h" #include "src/core/client_channel/dynamic_filters.h"
#include "src/core/client_channel/global_subchannel_pool.h" #include "src/core/client_channel/global_subchannel_pool.h"
#include "src/core/client_channel/local_subchannel_pool.h" #include "src/core/client_channel/local_subchannel_pool.h"
#include "src/core/client_channel/retry_filter.h"
#include "src/core/client_channel/subchannel.h" #include "src/core/client_channel/subchannel.h"
#include "src/core/client_channel/subchannel_interface_internal.h" #include "src/core/client_channel/subchannel_interface_internal.h"
#include "src/core/ext/filters/channel_idle/legacy_channel_idle_filter.h" #include "src/core/ext/filters/channel_idle/legacy_channel_idle_filter.h"
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/promise_based_filter.h"
#include "src/core/lib/channel/status_util.h" #include "src/core/lib/channel/status_util.h"
#include "src/core/lib/config/core_configuration.h" #include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gprpp/crash.h" #include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/gprpp/sync.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/unique_type_name.h"
#include "src/core/lib/gprpp/work_serializer.h" #include "src/core/lib/gprpp/work_serializer.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolved_address.h" #include "src/core/lib/iomgr/resolved_address.h"
#include "src/core/lib/promise/cancel_callback.h"
#include "src/core/lib/promise/context.h" #include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/exec_ctx_wakeup_scheduler.h" #include "src/core/lib/promise/exec_ctx_wakeup_scheduler.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/loop.h" #include "src/core/lib/promise/loop.h"
#include "src/core/lib/promise/map.h" #include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/promise/poll.h" #include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/promise.h"
#include "src/core/lib/promise/sleep.h" #include "src/core/lib/promise/sleep.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h" #include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/slice/slice.h" #include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/client_call.h"
#include "src/core/lib/surface/completion_queue.h" #include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/call_spine.h" #include "src/core/lib/transport/call_spine.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h" #include "src/core/lib/transport/metadata_batch.h"
#include "src/core/load_balancing/backend_metric_parser.h"
#include "src/core/load_balancing/child_policy_handler.h" #include "src/core/load_balancing/child_policy_handler.h"
#include "src/core/load_balancing/lb_policy.h" #include "src/core/load_balancing/lb_policy.h"
#include "src/core/load_balancing/lb_policy_registry.h" #include "src/core/load_balancing/lb_policy_registry.h"
#include "src/core/load_balancing/subchannel_interface.h" #include "src/core/load_balancing/subchannel_interface.h"
#include "src/core/resolver/endpoint_addresses.h" #include "src/core/resolver/endpoint_addresses.h"
#include "src/core/resolver/resolver_registry.h" #include "src/core/resolver/resolver_registry.h"
#include "src/core/service_config/service_config_call_data.h"
#include "src/core/service_config/service_config_impl.h" #include "src/core/service_config/service_config_impl.h"
#include "src/core/telemetry/metrics.h" #include "src/core/telemetry/metrics.h"
#include "src/core/util/json/json.h" #include "src/core/util/json/json.h"
@ -129,7 +112,8 @@ extern TraceFlag grpc_client_channel_lb_call_trace;
class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler { class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
public: public:
explicit ResolverResultHandler(RefCountedPtr<ClientChannel> client_channel) explicit ResolverResultHandler(
WeakRefCountedPtr<ClientChannel> client_channel)
: client_channel_(std::move(client_channel)) {} : client_channel_(std::move(client_channel)) {}
~ResolverResultHandler() override { ~ResolverResultHandler() override {
@ -145,7 +129,7 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
} }
private: private:
RefCountedPtr<ClientChannel> client_channel_; WeakRefCountedPtr<ClientChannel> client_channel_;
}; };
// //
@ -163,7 +147,7 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
class ClientChannel::SubchannelWrapper class ClientChannel::SubchannelWrapper
: public SubchannelInterfaceWithCallDestination { : public SubchannelInterfaceWithCallDestination {
public: public:
SubchannelWrapper(RefCountedPtr<ClientChannel> client_channel, SubchannelWrapper(WeakRefCountedPtr<ClientChannel> client_channel,
RefCountedPtr<Subchannel> subchannel); RefCountedPtr<Subchannel> subchannel);
~SubchannelWrapper() override; ~SubchannelWrapper() override;
@ -210,7 +194,7 @@ class ClientChannel::SubchannelWrapper
} }
}; };
RefCountedPtr<ClientChannel> client_channel_; WeakRefCountedPtr<ClientChannel> client_channel_;
RefCountedPtr<Subchannel> subchannel_; RefCountedPtr<Subchannel> subchannel_;
// Maps from the address of the watcher passed to us by the LB policy // Maps from the address of the watcher passed to us by the LB policy
// to the address of the WrapperWatcher that we passed to the underlying // to the address of the WrapperWatcher that we passed to the underlying
@ -333,7 +317,7 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
}; };
ClientChannel::SubchannelWrapper::SubchannelWrapper( ClientChannel::SubchannelWrapper::SubchannelWrapper(
RefCountedPtr<ClientChannel> client_channel, WeakRefCountedPtr<ClientChannel> client_channel,
RefCountedPtr<Subchannel> subchannel) RefCountedPtr<Subchannel> subchannel)
: SubchannelInterfaceWithCallDestination( : SubchannelInterfaceWithCallDestination(
GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace) GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)
@ -450,7 +434,7 @@ class ClientChannel::ClientChannelControlHelper
: public LoadBalancingPolicy::ChannelControlHelper { : public LoadBalancingPolicy::ChannelControlHelper {
public: public:
explicit ClientChannelControlHelper( explicit ClientChannelControlHelper(
RefCountedPtr<ClientChannel> client_channel) WeakRefCountedPtr<ClientChannel> client_channel)
: client_channel_(std::move(client_channel)) {} : client_channel_(std::move(client_channel)) {}
~ClientChannelControlHelper() override { ~ClientChannelControlHelper() override {
@ -552,7 +536,7 @@ class ClientChannel::ClientChannelControlHelper
return channelz::ChannelTrace::Error; return channelz::ChannelTrace::Error;
} }
RefCountedPtr<ClientChannel> client_channel_; WeakRefCountedPtr<ClientChannel> client_channel_;
}; };
// //
@ -571,9 +555,8 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
} // namespace } // namespace
absl::StatusOr<OrphanablePtr<Channel>> ClientChannel::Create( absl::StatusOr<RefCountedPtr<Channel>> ClientChannel::Create(
std::string target, ChannelArgs channel_args) { std::string target, ChannelArgs channel_args) {
gpr_log(GPR_ERROR, "ARGS: %s", channel_args.ToString().c_str());
// Get URI to resolve, using proxy mapper if needed. // Get URI to resolve, using proxy mapper if needed.
if (target.empty()) { if (target.empty()) {
return absl::InternalError("target URI is empty in client channel"); return absl::InternalError("target URI is empty in client channel");
@ -617,7 +600,7 @@ absl::StatusOr<OrphanablePtr<Channel>> ClientChannel::Create(
"Missing event engine in args for client channel"); "Missing event engine in args for client channel");
} }
// Success. Construct channel. // Success. Construct channel.
return MakeOrphanable<ClientChannel>( return MakeRefCounted<ClientChannel>(
std::move(target), std::move(channel_args), std::move(uri_to_resolve), std::move(target), std::move(channel_args), std::move(uri_to_resolve),
std::move(*default_service_config), client_channel_factory, std::move(*default_service_config), client_channel_factory,
call_destination_factory); call_destination_factory);
@ -684,11 +667,14 @@ ClientChannel::~ClientChannel() {
} }
} }
void ClientChannel::Orphan() { void ClientChannel::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: shutting down", this); gpr_log(GPR_INFO, "client_channel=%p: shutting down", this);
} }
auto self = RefAsSubclass<ClientChannel>(); // Weird capture then copy needed to satisfy thread safety analysis,
// otherwise it seems to fail to recognize the correct lock is taken in the
// lambda.
auto self = WeakRefAsSubclass<ClientChannel>();
work_serializer_->Run( work_serializer_->Run(
[self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) { [self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) {
self->DestroyResolverAndLbPolicyLocked(); self->DestroyResolverAndLbPolicyLocked();
@ -698,7 +684,6 @@ void ClientChannel::Orphan() {
// timer from being reset by other threads. // timer from being reset by other threads.
idle_state_.IncreaseCallCount(); idle_state_.IncreaseCallCount();
idle_activity_.Reset(); idle_activity_.Reset();
Unref();
} }
grpc_connectivity_state ClientChannel::CheckConnectivityState( grpc_connectivity_state ClientChannel::CheckConnectivityState(
@ -710,7 +695,7 @@ grpc_connectivity_state ClientChannel::CheckConnectivityState(
grpc_connectivity_state state = grpc_connectivity_state state =
ABSL_TS_UNCHECKED_READ(state_tracker_).state(); ABSL_TS_UNCHECKED_READ(state_tracker_).state();
if (state == GRPC_CHANNEL_IDLE && try_to_connect) { if (state == GRPC_CHANNEL_IDLE && try_to_connect) {
auto self = RefAsSubclass<ClientChannel>(); // Held by callback. auto self = WeakRefAsSubclass<ClientChannel>(); // Held by callback.
work_serializer_->Run( work_serializer_->Run(
[self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) { [self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) {
self->TryToConnectLocked(); self->TryToConnectLocked();
@ -801,34 +786,26 @@ void ClientChannel::Ping(grpc_completion_queue*, void*) {
Crash("not implemented"); Crash("not implemented");
} }
grpc_call* ClientChannel::CreateCall(grpc_call*, uint32_t, grpc_call* ClientChannel::CreateCall(
grpc_completion_queue*, grpc_pollset_set*, grpc_call* parent_call, uint32_t propagation_mask,
Slice, absl::optional<Slice>, Timestamp, grpc_completion_queue* cq, grpc_pollset_set* /*pollset_set_alternative*/,
bool) { Slice path, absl::optional<Slice> authority, Timestamp deadline, bool) {
// TODO(ctiller): code to convert from C-core batch API to v3 call, then return MakeClientCall(parent_call, propagation_mask, cq, std::move(path),
// invoke CreateCall(client_initial_metadata, arena) std::move(authority), false, deadline,
// TODO(ctiller): make sure call holds a ref to ClientChannel for its entire compression_options(), event_engine_.get(),
// lifetime call_arena_allocator()->MakeArena(), Ref());
Crash("not implemented");
return nullptr;
} }
CallInitiator ClientChannel::CreateCall( void ClientChannel::StartCall(UnstartedCallHandler unstarted_handler) {
ClientMetadataHandle client_initial_metadata) {
// Increment call count. // Increment call count.
if (idle_timeout_ != Duration::Zero()) idle_state_.IncreaseCallCount(); if (idle_timeout_ != Duration::Zero()) idle_state_.IncreaseCallCount();
// Exit IDLE if needed. // Exit IDLE if needed.
CheckConnectivityState(/*try_to_connect=*/true); CheckConnectivityState(/*try_to_connect=*/true);
// Create an initiator/unstarted-handler pair.
auto call =
MakeCallPair(std::move(client_initial_metadata), event_engine_.get(),
call_arena_allocator()->MakeArena());
// Spawn a promise to wait for the resolver result. // Spawn a promise to wait for the resolver result.
// This will eventually start the call. // This will eventually start the call.
call.initiator.SpawnGuardedUntilCallCompletes( unstarted_handler.SpawnGuardedUntilCallCompletes(
"wait-for-name-resolution", "wait-for-name-resolution",
[self = RefAsSubclass<ClientChannel>(), [self = RefAsSubclass<ClientChannel>(), unstarted_handler]() mutable {
unstarted_handler = std::move(call.handler)]() mutable {
const bool wait_for_ready = const bool wait_for_ready =
unstarted_handler.UnprocessedClientInitialMetadata() unstarted_handler.UnprocessedClientInitialMetadata()
.GetOrCreatePointer(WaitForReady()) .GetOrCreatePointer(WaitForReady())
@ -878,8 +855,6 @@ CallInitiator ClientChannel::CreateCall(
return absl::OkStatus(); return absl::OkStatus();
}); });
}); });
// Return the initiator.
return std::move(call.initiator);
} }
void ClientChannel::CreateResolverLocked() { void ClientChannel::CreateResolverLocked() {
@ -889,7 +864,8 @@ void ClientChannel::CreateResolverLocked() {
} }
resolver_ = CoreConfiguration::Get().resolver_registry().CreateResolver( resolver_ = CoreConfiguration::Get().resolver_registry().CreateResolver(
uri_to_resolve_, channel_args_, nullptr, work_serializer_, uri_to_resolve_, channel_args_, nullptr, work_serializer_,
std::make_unique<ResolverResultHandler>(RefAsSubclass<ClientChannel>())); std::make_unique<ResolverResultHandler>(
WeakRefAsSubclass<ClientChannel>()));
// Since the validity of the args was checked when the channel was created, // Since the validity of the args was checked when the channel was created,
// CreateResolver() must return a non-null result. // CreateResolver() must return a non-null result.
CHECK(resolver_ != nullptr); CHECK(resolver_ != nullptr);
@ -919,7 +895,8 @@ void ClientChannel::DestroyResolverAndLbPolicyLocked() {
lb_policy_.get()); lb_policy_.get());
} }
lb_policy_.reset(); lb_policy_.reset();
picker_.Set(nullptr); picker_.Set(MakeRefCounted<LoadBalancingPolicy::DropPicker>(
absl::UnavailableError("Channel shutdown")));
} }
} }
} }
@ -1165,8 +1142,8 @@ absl::Status ClientChannel::CreateOrUpdateLbPolicyLocked(
update_args.config = std::move(lb_policy_config); update_args.config = std::move(lb_policy_config);
update_args.resolution_note = std::move(result.resolution_note); update_args.resolution_note = std::move(result.resolution_note);
// Remove the config selector from channel args so that we're not holding // Remove the config selector from channel args so that we're not holding
// unnecessary refs that cause it to be destroyed somewhere other than in the // unnecessary refs that cause it to be destroyed somewhere other than in
// WorkSerializer. // the WorkSerializer.
update_args.args = result.args.Remove(GRPC_ARG_CONFIG_SELECTOR); update_args.args = result.args.Remove(GRPC_ARG_CONFIG_SELECTOR);
// Add health check service name to channel args. // Add health check service name to channel args.
if (health_check_service_name.has_value()) { if (health_check_service_name.has_value()) {
@ -1200,7 +1177,7 @@ OrphanablePtr<LoadBalancingPolicy> ClientChannel::CreateLbPolicyLocked(
lb_policy_args.work_serializer = work_serializer_; lb_policy_args.work_serializer = work_serializer_;
lb_policy_args.channel_control_helper = lb_policy_args.channel_control_helper =
std::make_unique<ClientChannelControlHelper>( std::make_unique<ClientChannelControlHelper>(
RefAsSubclass<ClientChannel>()); WeakRefAsSubclass<ClientChannel>());
lb_policy_args.args = args; lb_policy_args.args = args;
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
@ -1305,7 +1282,7 @@ void ClientChannel::StartIdleTimer() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: idle timer started", this); gpr_log(GPR_INFO, "client_channel=%p: idle timer started", this);
} }
auto self = RefAsSubclass<ClientChannel>(); auto self = WeakRefAsSubclass<ClientChannel>();
auto promise = Loop([self]() { auto promise = Loop([self]() {
return TrySeq(Sleep(Timestamp::Now() + self->idle_timeout_), return TrySeq(Sleep(Timestamp::Now() + self->idle_timeout_),
[self]() -> Poll<LoopCtl<absl::Status>> { [self]() -> Poll<LoopCtl<absl::Status>> {
@ -1359,7 +1336,7 @@ absl::Status ClientChannel::ApplyServiceConfigToCall(
return MaybeRewriteIllegalStatusCode(call_config_status, "ConfigSelector"); return MaybeRewriteIllegalStatusCode(call_config_status, "ConfigSelector");
} }
// Apply our own method params to the call. // Apply our own method params to the call.
auto* method_params = static_cast<ClientChannelMethodParsedConfig*>( auto* method_params = DownCast<ClientChannelMethodParsedConfig*>(
service_config_call_data->GetMethodParsedConfig( service_config_call_data->GetMethodParsedConfig(
service_config_parser_index_)); service_config_parser_index_));
if (method_params != nullptr) { if (method_params != nullptr) {

@ -57,7 +57,7 @@ class ClientChannel : public Channel {
~CallDestinationFactory() = default; ~CallDestinationFactory() = default;
}; };
static absl::StatusOr<OrphanablePtr<Channel>> Create( static absl::StatusOr<RefCountedPtr<Channel>> Create(
std::string target, ChannelArgs channel_args); std::string target, ChannelArgs channel_args);
// Do not instantiate directly -- use Create() instead. // Do not instantiate directly -- use Create() instead.
@ -69,7 +69,7 @@ class ClientChannel : public Channel {
~ClientChannel() override; ~ClientChannel() override;
void Orphan() override; void Orphaned() override;
grpc_call* CreateCall(grpc_call* parent_call, uint32_t propagation_mask, grpc_call* CreateCall(grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, grpc_completion_queue* cq,
@ -77,7 +77,7 @@ class ClientChannel : public Channel {
Slice path, absl::optional<Slice> authority, Slice path, absl::optional<Slice> authority,
Timestamp deadline, bool registered_method) override; Timestamp deadline, bool registered_method) override;
CallInitiator CreateCall(ClientMetadataHandle client_initial_metadata); void StartCall(UnstartedCallHandler unstarted_handler) override;
grpc_event_engine::experimental::EventEngine* event_engine() const override { grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_.get(); return event_engine_.get();

@ -304,119 +304,12 @@ class ClientChannelFilter::FilterBasedCallData final
grpc_error_handle cancel_error_; grpc_error_handle cancel_error_;
}; };
class ClientChannelFilter::PromiseBasedCallData final
: public ClientChannelFilter::CallData {
public:
explicit PromiseBasedCallData(ClientChannelFilter* chand) : chand_(chand) {}
~PromiseBasedCallData() override {
if (was_queued_ && client_initial_metadata_ != nullptr) {
MutexLock lock(&chand_->resolution_mu_);
RemoveCallFromResolverQueuedCallsLocked();
chand_->resolver_queued_calls_.erase(this);
}
}
ArenaPromise<absl::StatusOr<CallArgs>> MakeNameResolutionPromise(
CallArgs call_args) {
pollent_ = NowOrNever(call_args.polling_entity->WaitAndCopy()).value();
client_initial_metadata_ = std::move(call_args.client_initial_metadata);
// If we're still in IDLE, we need to start resolving.
if (GPR_UNLIKELY(chand_->CheckConnectivityState(false) ==
GRPC_CHANNEL_IDLE)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: %striggering exit idle", chand_,
this, GetContext<Activity>()->DebugTag().c_str());
}
// Bounce into the control plane work serializer to start resolving.
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ExitIdle");
chand_->work_serializer_->Run(
[chand = chand_]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*chand_->work_serializer_) {
chand->CheckConnectivityState(/*try_to_connect=*/true);
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_, "ExitIdle");
},
DEBUG_LOCATION);
}
return [this, call_args = std::move(
call_args)]() mutable -> Poll<absl::StatusOr<CallArgs>> {
auto result = CheckResolution(was_queued_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: %sCheckResolution returns %s",
chand_, this, GetContext<Activity>()->DebugTag().c_str(),
result.has_value() ? result->ToString().c_str() : "Pending");
}
if (!result.has_value()) return Pending{};
if (!result->ok()) return *result;
call_args.client_initial_metadata = std::move(client_initial_metadata_);
return std::move(call_args);
};
}
private:
ClientChannelFilter* chand() const override { return chand_; }
Arena* arena() const override { return GetContext<Arena>(); }
grpc_polling_entity* pollent() override { return &pollent_; }
grpc_metadata_batch* send_initial_metadata() override {
return client_initial_metadata_.get();
}
void OnAddToQueueLocked() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannelFilter::resolution_mu_) {
waker_ = GetContext<Activity>()->MakeNonOwningWaker();
was_queued_ = true;
}
void RetryCheckResolutionLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
&ClientChannelFilter::resolution_mu_) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: RetryCheckResolutionLocked(): %s",
chand_, this, waker_.ActivityDebugTag().c_str());
}
waker_.WakeupAsync();
}
void ResetDeadline(Duration timeout) override {
Call* call = GetContext<Call>();
CallContext* call_context = GetContext<CallContext>();
const Timestamp per_method_deadline =
Timestamp::FromCycleCounterRoundUp(call_context->call_start_time()) +
timeout;
call->UpdateDeadline(per_method_deadline);
}
ClientChannelFilter* chand_;
grpc_polling_entity pollent_;
ClientMetadataHandle client_initial_metadata_;
bool was_queued_ = false;
Waker waker_ ABSL_GUARDED_BY(&ClientChannelFilter::resolution_mu_);
};
// //
// Filter vtable // Filter vtable
// //
const grpc_channel_filter ClientChannelFilter::kFilterVtableWithPromises = { const grpc_channel_filter ClientChannelFilter::kFilter = {
ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch, ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch,
ClientChannelFilter::MakeCallPromise,
/* init_call: */ nullptr,
ClientChannelFilter::StartTransportOp,
sizeof(ClientChannelFilter::FilterBasedCallData),
ClientChannelFilter::FilterBasedCallData::Init,
ClientChannelFilter::FilterBasedCallData::SetPollent,
ClientChannelFilter::FilterBasedCallData::Destroy,
sizeof(ClientChannelFilter),
ClientChannelFilter::Init,
grpc_channel_stack_no_post_init,
ClientChannelFilter::Destroy,
ClientChannelFilter::GetChannelInfo,
"client-channel",
};
const grpc_channel_filter ClientChannelFilter::kFilterVtableWithoutPromises = {
ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch,
nullptr,
/* init_call: */ nullptr,
ClientChannelFilter::StartTransportOp, ClientChannelFilter::StartTransportOp,
sizeof(ClientChannelFilter::FilterBasedCallData), sizeof(ClientChannelFilter::FilterBasedCallData),
ClientChannelFilter::FilterBasedCallData::Init, ClientChannelFilter::FilterBasedCallData::Init,
@ -466,19 +359,6 @@ class DynamicTerminationFilter final {
static void GetChannelInfo(grpc_channel_element* /*elem*/, static void GetChannelInfo(grpc_channel_element* /*elem*/,
const grpc_channel_info* /*info*/) {} const grpc_channel_info* /*info*/) {}
static ArenaPromise<ServerMetadataHandle> MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args, NextPromiseFactory) {
auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
return chand->chand_->CreateLoadBalancedCallPromise(
std::move(call_args),
[]() {
auto* service_config_call_data =
GetServiceConfigCallData(GetContext<Arena>());
service_config_call_data->Commit();
},
/*is_transparent_retry=*/false);
}
private: private:
explicit DynamicTerminationFilter(const ChannelArgs& args) explicit DynamicTerminationFilter(const ChannelArgs& args)
: chand_(args.GetObject<ClientChannelFilter>()) {} : chand_(args.GetObject<ClientChannelFilter>()) {}
@ -559,8 +439,6 @@ class DynamicTerminationFilter::CallData final {
const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = { const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
DynamicTerminationFilter::CallData::StartTransportStreamOpBatch, DynamicTerminationFilter::CallData::StartTransportStreamOpBatch,
DynamicTerminationFilter::MakeCallPromise,
/* init_call: */ nullptr,
DynamicTerminationFilter::StartTransportOp, DynamicTerminationFilter::StartTransportOp,
sizeof(DynamicTerminationFilter::CallData), sizeof(DynamicTerminationFilter::CallData),
DynamicTerminationFilter::CallData::Init, DynamicTerminationFilter::CallData::Init,
@ -1190,8 +1068,7 @@ class ClientChannelFilter::ClientChannelControlHelper final
grpc_error_handle ClientChannelFilter::Init(grpc_channel_element* elem, grpc_error_handle ClientChannelFilter::Init(grpc_channel_element* elem,
grpc_channel_element_args* args) { grpc_channel_element_args* args) {
CHECK(args->is_last); CHECK(args->is_last);
CHECK(elem->filter == &kFilterVtableWithPromises || CHECK(elem->filter == &kFilter);
elem->filter == &kFilterVtableWithoutPromises);
grpc_error_handle error; grpc_error_handle error;
new (elem->channel_data) ClientChannelFilter(args, &error); new (elem->channel_data) ClientChannelFilter(args, &error);
return error; return error;
@ -1308,21 +1185,6 @@ ClientChannelFilter::~ClientChannelFilter() {
grpc_pollset_set_destroy(interested_parties_); grpc_pollset_set_destroy(interested_parties_);
} }
ArenaPromise<ServerMetadataHandle> ClientChannelFilter::MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args, NextPromiseFactory) {
auto* chand = static_cast<ClientChannelFilter*>(elem->channel_data);
// TODO(roth): Is this the right lifetime story for calld?
auto* calld = GetContext<Arena>()->ManagedNew<PromiseBasedCallData>(chand);
return TrySeq(
// Name resolution.
calld->MakeNameResolutionPromise(std::move(call_args)),
// Dynamic filter stack.
[calld](CallArgs call_args) mutable {
return calld->dynamic_filters()->channel_stack()->MakeClientCallPromise(
std::move(call_args));
});
}
OrphanablePtr<ClientChannelFilter::FilterBasedLoadBalancedCall> OrphanablePtr<ClientChannelFilter::FilterBasedLoadBalancedCall>
ClientChannelFilter::CreateLoadBalancedCall( ClientChannelFilter::CreateLoadBalancedCall(
const grpc_call_element_args& args, grpc_polling_entity* pollent, const grpc_call_element_args& args, grpc_polling_entity* pollent,
@ -1335,17 +1197,6 @@ ClientChannelFilter::CreateLoadBalancedCall(
std::move(on_commit), is_transparent_retry)); std::move(on_commit), is_transparent_retry));
} }
ArenaPromise<ServerMetadataHandle>
ClientChannelFilter::CreateLoadBalancedCallPromise(
CallArgs call_args, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry) {
OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call(
GetContext<Arena>()->New<PromiseBasedLoadBalancedCall>(
this, std::move(on_commit), is_transparent_retry));
auto* call_ptr = lb_call.get();
return call_ptr->MakeCallPromise(std::move(call_args), std::move(lb_call));
}
void ClientChannelFilter::ReprocessQueuedResolverCalls() { void ClientChannelFilter::ReprocessQueuedResolverCalls() {
for (CallData* calld : resolver_queued_calls_) { for (CallData* calld : resolver_queued_calls_) {
calld->RemoveCallFromResolverQueuedCallsLocked(); calld->RemoveCallFromResolverQueuedCallsLocked();
@ -3443,7 +3294,7 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::CreateSubchannelCall() {
connected_subchannel()->Ref(), pollent_, path->Ref(), /*start_time=*/0, connected_subchannel()->Ref(), pollent_, path->Ref(), /*start_time=*/0,
arena()->GetContext<Call>()->deadline(), arena()->GetContext<Call>()->deadline(),
// TODO(roth): When we implement hedging support, we will probably // TODO(roth): When we implement hedging support, we will probably
// need to use a separate arena for each subchannel call. // need to use a separate call arena for each subchannel call.
arena(), call_combiner_}; arena(), call_combiner_};
grpc_error_handle error; grpc_error_handle error;
subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error); subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
@ -3463,146 +3314,4 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::CreateSubchannelCall() {
} }
} }
//
// ClientChannelFilter::PromiseBasedLoadBalancedCall
//
ClientChannelFilter::PromiseBasedLoadBalancedCall::PromiseBasedLoadBalancedCall(
ClientChannelFilter* chand, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry)
: LoadBalancedCall(chand, GetContext<Arena>(), std::move(on_commit),
is_transparent_retry) {}
ArenaPromise<ServerMetadataHandle>
ClientChannelFilter::PromiseBasedLoadBalancedCall::MakeCallPromise(
CallArgs call_args, OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call) {
pollent_ = NowOrNever(call_args.polling_entity->WaitAndCopy()).value();
// Record ops in tracer.
if (call_attempt_tracer() != nullptr) {
call_attempt_tracer()->RecordSendInitialMetadata(
call_args.client_initial_metadata.get());
// TODO(ctiller): Find a way to do this without registering a no-op mapper.
call_args.client_to_server_messages->InterceptAndMapWithHalfClose(
[](MessageHandle message) { return message; }, // No-op.
[this]() {
// TODO(roth): Change CallTracer API to not pass metadata
// batch to this method, since the batch is always empty.
grpc_metadata_batch metadata;
call_attempt_tracer()->RecordSendTrailingMetadata(&metadata);
});
}
// Extract peer name from server initial metadata.
call_args.server_initial_metadata->InterceptAndMap(
[self = lb_call->RefAsSubclass<PromiseBasedLoadBalancedCall>()](
ServerMetadataHandle metadata) {
if (self->call_attempt_tracer() != nullptr) {
self->call_attempt_tracer()->RecordReceivedInitialMetadata(
metadata.get());
}
Slice* peer_string = metadata->get_pointer(PeerString());
if (peer_string != nullptr) self->peer_string_ = peer_string->Ref();
return metadata;
});
client_initial_metadata_ = std::move(call_args.client_initial_metadata);
return OnCancel(
Map(TrySeq(
// LB pick.
[this]() -> Poll<absl::Status> {
auto result = PickSubchannel(was_queued_);
if (GRPC_TRACE_FLAG_ENABLED(
grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: %sPickSubchannel() returns %s",
chand(), this,
GetContext<Activity>()->DebugTag().c_str(),
result.has_value() ? result->ToString().c_str()
: "Pending");
}
if (result == absl::nullopt) return Pending{};
return std::move(*result);
},
[this, call_args = std::move(call_args)]() mutable
-> ArenaPromise<ServerMetadataHandle> {
call_args.client_initial_metadata =
std::move(client_initial_metadata_);
return connected_subchannel()->MakeCallPromise(
std::move(call_args));
}),
// Record call completion.
[this](ServerMetadataHandle metadata) {
if (call_attempt_tracer() != nullptr ||
lb_subchannel_call_tracker() != nullptr) {
absl::Status status;
grpc_status_code code = metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN);
if (code != GRPC_STATUS_OK) {
absl::string_view message;
if (const auto* grpc_message =
metadata->get_pointer(GrpcMessageMetadata())) {
message = grpc_message->as_string_view();
}
status =
absl::Status(static_cast<absl::StatusCode>(code), message);
}
RecordCallCompletion(status, metadata.get(),
&GetContext<CallContext>()
->call_stats()
->transport_stream_stats,
peer_string_.as_string_view());
}
RecordLatency();
return metadata;
}),
[lb_call = std::move(lb_call)]() {
// If the waker is pending, then we need to remove ourself from
// the list of queued LB calls.
if (!lb_call->waker_.is_unwakeable()) {
MutexLock lock(&lb_call->chand()->lb_mu_);
lb_call->Commit();
// Remove pick from list of queued picks.
lb_call->RemoveCallFromLbQueuedCallsLocked();
// Remove from queued picks list.
lb_call->chand()->lb_queued_calls_.erase(lb_call.get());
}
// TODO(ctiller): We don't have access to the call's actual status
// here, so we just assume CANCELLED. We could change this to use
// CallFinalization instead of OnCancel() so that we can get the
// actual status. But we should also have access to the trailing
// metadata, which we don't have in either case. Ultimately, we
// need a better story for code that needs to run at the end of a
// call in both cancellation and non-cancellation cases that needs
// access to server trailing metadata and the call's real status.
if (lb_call->call_attempt_tracer() != nullptr) {
lb_call->call_attempt_tracer()->RecordCancel(
absl::CancelledError("call cancelled"));
}
if (lb_call->call_attempt_tracer() != nullptr ||
lb_call->lb_subchannel_call_tracker() != nullptr) {
// If we were cancelled without recording call completion, then
// record call completion here, as best we can. We assume status
// CANCELLED in this case.
lb_call->RecordCallCompletion(absl::CancelledError("call cancelled"),
nullptr, nullptr, "");
}
});
}
grpc_metadata_batch*
ClientChannelFilter::PromiseBasedLoadBalancedCall::send_initial_metadata()
const {
return client_initial_metadata_.get();
}
void ClientChannelFilter::PromiseBasedLoadBalancedCall::OnAddToQueueLocked() {
waker_ = GetContext<Activity>()->MakeNonOwningWaker();
was_queued_ = true;
}
void ClientChannelFilter::PromiseBasedLoadBalancedCall::RetryPickLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: RetryPickLocked()", chand(), this);
}
waker_.WakeupAsync();
}
} // namespace grpc_core } // namespace grpc_core

@ -57,8 +57,6 @@
#include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_fwd.h" #include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/resource_quota/arena.h" #include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice.h" #include "src/core/lib/slice/slice.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
@ -99,12 +97,10 @@ namespace grpc_core {
class ClientChannelFilter final { class ClientChannelFilter final {
public: public:
static const grpc_channel_filter kFilterVtableWithPromises; static const grpc_channel_filter kFilter;
static const grpc_channel_filter kFilterVtableWithoutPromises;
class LoadBalancedCall; class LoadBalancedCall;
class FilterBasedLoadBalancedCall; class FilterBasedLoadBalancedCall;
class PromiseBasedLoadBalancedCall;
// Flag that this object gets stored in channel args as a raw pointer. // Flag that this object gets stored in channel args as a raw pointer.
struct RawPointerChannelArgTag {}; struct RawPointerChannelArgTag {};
@ -112,10 +108,6 @@ class ClientChannelFilter final {
return "grpc.internal.client_channel_filter"; return "grpc.internal.client_channel_filter";
} }
static ArenaPromise<ServerMetadataHandle> MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next_promise_factory);
grpc_connectivity_state CheckConnectivityState(bool try_to_connect); grpc_connectivity_state CheckConnectivityState(bool try_to_connect);
// Starts a one-time connectivity state watch. When the channel's state // Starts a one-time connectivity state watch. When the channel's state
@ -160,14 +152,9 @@ class ClientChannelFilter final {
grpc_closure* on_call_destruction_complete, grpc_closure* on_call_destruction_complete,
absl::AnyInvocable<void()> on_commit, bool is_transparent_retry); absl::AnyInvocable<void()> on_commit, bool is_transparent_retry);
ArenaPromise<ServerMetadataHandle> CreateLoadBalancedCallPromise(
CallArgs call_args, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry);
private: private:
class CallData; class CallData;
class FilterBasedCallData; class FilterBasedCallData;
class PromiseBasedCallData;
class ResolverResultHandler; class ResolverResultHandler;
class SubchannelWrapper; class SubchannelWrapper;
class ClientChannelControlHelper; class ClientChannelControlHelper;
@ -581,32 +568,6 @@ class ClientChannelFilter::FilterBasedLoadBalancedCall final
grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {}; grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {};
}; };
class ClientChannelFilter::PromiseBasedLoadBalancedCall final
: public ClientChannelFilter::LoadBalancedCall {
public:
PromiseBasedLoadBalancedCall(ClientChannelFilter* chand,
absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry);
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args, OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call);
private:
grpc_polling_entity* pollent() override { return &pollent_; }
grpc_metadata_batch* send_initial_metadata() const override;
void RetryPickLocked() override;
void OnAddToQueueLocked() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannelFilter::lb_mu_);
grpc_polling_entity pollent_;
ClientMetadataHandle client_initial_metadata_;
Waker waker_;
bool was_queued_ = false;
Slice peer_string_;
};
} // namespace grpc_core } // namespace grpc_core
#endif // GRPC_SRC_CORE_CLIENT_CHANNEL_CLIENT_CHANNEL_FILTER_H #endif // GRPC_SRC_CORE_CLIENT_CHANNEL_CLIENT_CHANNEL_FILTER_H

@ -31,24 +31,11 @@
namespace grpc_core { namespace grpc_core {
namespace {
bool IsEverythingBelowClientChannelPromiseSafe(const ChannelArgs& args) {
return !args.GetBool(GRPC_ARG_ENABLE_RETRIES).value_or(true);
}
} // namespace
void BuildClientChannelConfiguration(CoreConfiguration::Builder* builder) { void BuildClientChannelConfiguration(CoreConfiguration::Builder* builder) {
internal::ClientChannelServiceConfigParser::Register(builder); internal::ClientChannelServiceConfigParser::Register(builder);
internal::RetryServiceConfigParser::Register(builder); internal::RetryServiceConfigParser::Register(builder);
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_CLIENT_CHANNEL, ->RegisterV2Filter<ClientChannelFilter>(GRPC_CLIENT_CHANNEL)
&ClientChannelFilter::kFilterVtableWithPromises)
.If(IsEverythingBelowClientChannelPromiseSafe)
.Terminal();
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_CHANNEL,
&ClientChannelFilter::kFilterVtableWithoutPromises)
.IfNot(IsEverythingBelowClientChannelPromiseSafe)
.Terminal(); .Terminal();
} }

@ -18,6 +18,7 @@
#include "src/core/client_channel/client_channel_internal.h" #include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/subchannel.h" #include "src/core/client_channel/subchannel.h"
#include "src/core/lib/channel/status_util.h" #include "src/core/lib/channel/status_util.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/promise/loop.h" #include "src/core/lib/promise/loop.h"
#include "src/core/telemetry/call_tracer.h" #include "src/core/telemetry/call_tracer.h"
@ -289,6 +290,7 @@ void LoadBalancedCallDestination::StartCall(
[unstarted_handler, &last_picker]( [unstarted_handler, &last_picker](
RefCountedPtr<LoadBalancingPolicy::SubchannelPicker> RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>
picker) mutable { picker) mutable {
CHECK_NE(picker.get(), nullptr);
last_picker = std::move(picker); last_picker = std::move(picker);
// Returns 3 possible things: // Returns 3 possible things:
// - Continue to queue the pick // - Continue to queue the pick
@ -330,4 +332,20 @@ void LoadBalancedCallDestination::StartCall(
}); });
} }
} // namespace grpc_core void RegisterLoadBalancedCallDestination(CoreConfiguration::Builder* builder) {
class LoadBalancedCallDestinationFactory final
: public ClientChannel::CallDestinationFactory {
public:
RefCountedPtr<UnstartedCallDestination> CreateCallDestination(
ClientChannel::PickerObservable picker) override {
return MakeRefCounted<LoadBalancedCallDestination>(std::move(picker));
}
};
builder->channel_args_preconditioning()->RegisterStage([](ChannelArgs args) {
return args.SetObject(
NoDestructSingleton<LoadBalancedCallDestinationFactory>::Get());
});
}
} // namespace grpc_core

@ -139,8 +139,6 @@ const RetryMethodConfig* RetryFilter::GetRetryPolicy(Arena* arena) {
const grpc_channel_filter RetryFilter::kVtable = { const grpc_channel_filter RetryFilter::kVtable = {
RetryFilter::LegacyCallData::StartTransportStreamOpBatch, RetryFilter::LegacyCallData::StartTransportStreamOpBatch,
nullptr,
/* init_call: */ nullptr,
RetryFilter::StartTransportOp, RetryFilter::StartTransportOp,
sizeof(RetryFilter::LegacyCallData), sizeof(RetryFilter::LegacyCallData),
RetryFilter::LegacyCallData::Init, RetryFilter::LegacyCallData::Init,

@ -157,36 +157,6 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
channel_stack_->call_stack_size; channel_stack_->call_stack_size;
} }
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args) override {
// If not using channelz, we just need to call the channel stack.
if (channelz_subchannel() == nullptr) {
return channel_stack_->MakeClientCallPromise(std::move(call_args));
}
// Otherwise, we need to wrap the channel stack promise with code that
// handles the channelz updates.
return OnCancel(
Seq(channel_stack_->MakeClientCallPromise(std::move(call_args)),
[self = Ref()](ServerMetadataHandle metadata) {
channelz::SubchannelNode* channelz_subchannel =
self->channelz_subchannel();
CHECK(channelz_subchannel != nullptr);
if (metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
channelz_subchannel->RecordCallFailed();
} else {
channelz_subchannel->RecordCallSucceeded();
}
return metadata;
}),
[self = Ref()]() {
channelz::SubchannelNode* channelz_subchannel =
self->channelz_subchannel();
CHECK(channelz_subchannel != nullptr);
channelz_subchannel->RecordCallFailed();
});
}
void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) override { void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) override {
grpc_transport_op* op = grpc_make_transport_op(nullptr); grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->send_ping.on_initiate = on_initiate; op->send_ping.on_initiate = on_initiate;
@ -252,10 +222,6 @@ class NewConnectedSubchannel : public ConnectedSubchannel {
size_t GetInitialCallSizeEstimate() const override { return 0; } size_t GetInitialCallSizeEstimate() const override { return 0; }
ArenaPromise<ServerMetadataHandle> MakeCallPromise(CallArgs) override {
Crash("legacy MakeCallPromise() method called in call v3 impl");
}
void Ping(grpc_closure*, grpc_closure*) override { void Ping(grpc_closure*, grpc_closure*) override {
Crash("legacy ping method called in call v3 impl"); Crash("legacy ping method called in call v3 impl");
} }
@ -869,8 +835,7 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
bool Subchannel::PublishTransportLocked() { bool Subchannel::PublishTransportLocked() {
auto socket_node = std::move(connecting_result_.socket_node); auto socket_node = std::move(connecting_result_.socket_node);
if (connecting_result_.transport->filter_stack_transport() != nullptr || if (connecting_result_.transport->filter_stack_transport() != nullptr) {
IsChaoticGoodEnabled()) {
// Construct channel stack. // Construct channel stack.
// Builder takes ownership of transport. // Builder takes ownership of transport.
ChannelStackBuilderImpl builder( ChannelStackBuilderImpl builder(

@ -82,8 +82,6 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
// Methods for legacy stack. // Methods for legacy stack.
virtual grpc_channel_stack* channel_stack() const = 0; virtual grpc_channel_stack* channel_stack() const = 0;
virtual size_t GetInitialCallSizeEstimate() const = 0; virtual size_t GetInitialCallSizeEstimate() const = 0;
virtual ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args) = 0;
virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0; virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0;
protected: protected:

@ -303,15 +303,13 @@ void RegisterLegacyChannelIdleFilters(CoreConfiguration::Builder* builder) {
.If([](const ChannelArgs& channel_args) { .If([](const ChannelArgs& channel_args) {
return GetClientIdleTimeout(channel_args) != Duration::Infinity(); return GetClientIdleTimeout(channel_args) != Duration::Infinity();
}); });
if (!IsChaoticGoodEnabled()) { builder->channel_init()
builder->channel_init() ->RegisterV2Filter<LegacyMaxAgeFilter>(GRPC_SERVER_CHANNEL)
->RegisterV2Filter<LegacyMaxAgeFilter>(GRPC_SERVER_CHANNEL) .ExcludeFromMinimalStack()
.ExcludeFromMinimalStack() .If([](const ChannelArgs& channel_args) {
.If([](const ChannelArgs& channel_args) { return LegacyMaxAgeFilter::Config::FromChannelArgs(channel_args)
return LegacyMaxAgeFilter::Config::FromChannelArgs(channel_args) .enable();
.enable(); });
});
}
} }
LegacyMaxAgeFilter::LegacyMaxAgeFilter(grpc_channel_stack* channel_stack, LegacyMaxAgeFilter::LegacyMaxAgeFilter(grpc_channel_stack* channel_stack,

@ -376,13 +376,14 @@ grpc_channel* grpc_chaotic_good_channel_create(const char* target,
grpc_core::CoreConfiguration::Get() grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning() .channel_args_preconditioning()
.PreconditionChannelArgs(args) .PreconditionChannelArgs(args)
.SetObject( .SetObject(grpc_core::NoDestructSingleton<
grpc_core::NoDestructSingleton< grpc_core::chaotic_good::ChaoticGoodChannelFactory>::Get())
grpc_core::chaotic_good::ChaoticGoodChannelFactory>::Get()), .Set(GRPC_ARG_USE_V3_STACK, true),
GRPC_CLIENT_CHANNEL, nullptr); GRPC_CLIENT_CHANNEL, nullptr);
if (r.ok()) { if (r.ok()) {
return r->release()->c_ptr(); return r->release()->c_ptr();
} }
LOG(ERROR) << "Failed to create chaotic good client channel: " << r.status();
error = absl_status_to_grpc_error(r.status()); error = absl_status_to_grpc_error(r.status());
intptr_t integer; intptr_t integer;
grpc_status_code status = GRPC_STATUS_INTERNAL; grpc_status_code status = GRPC_STATUS_INTERNAL;
@ -391,6 +392,6 @@ grpc_channel* grpc_chaotic_good_channel_create(const char* target,
status = static_cast<grpc_status_code>(integer); status = static_cast<grpc_status_code>(integer);
} }
channel = grpc_lame_client_channel_create( channel = grpc_lame_client_channel_create(
target, status, "Failed to create secure client channel"); target, status, "Failed to create chaotic good client channel");
return channel; return channel;
} }

@ -483,20 +483,21 @@ int grpc_server_add_chaotic_good_port(grpc_server* server, const char* addr) {
return 0; return 0;
} }
int port_num = 0; int port_num = 0;
std::vector<std::pair<std::string, absl::Status>> error_list;
for (const auto& resolved_addr : resolved_or.value()) { for (const auto& resolved_addr : resolved_or.value()) {
auto listener = grpc_core::MakeOrphanable< auto listener = grpc_core::MakeOrphanable<
grpc_core::chaotic_good::ChaoticGoodServerListener>( grpc_core::chaotic_good::ChaoticGoodServerListener>(
core_server, core_server->channel_args()); core_server, core_server->channel_args());
const auto ee_addr = const auto ee_addr =
grpc_event_engine::experimental::CreateResolvedAddress(resolved_addr); grpc_event_engine::experimental::CreateResolvedAddress(resolved_addr);
gpr_log(GPR_INFO, "BIND: %s", std::string addr_str =
grpc_event_engine::experimental::ResolvedAddressToString(ee_addr) *grpc_event_engine::experimental::ResolvedAddressToString(ee_addr);
->c_str()); LOG(INFO) << "BIND: " << addr_str;
auto bind_result = listener->Bind(ee_addr); auto bind_result = listener->Bind(ee_addr);
if (!bind_result.ok()) { if (!bind_result.ok()) {
LOG(ERROR) << "Failed to bind to " << addr << ": " error_list.push_back(
<< bind_result.status().ToString(); std::make_pair(std::move(addr_str), bind_result.status()));
return 0; continue;
} }
if (port_num == 0) { if (port_num == 0) {
port_num = bind_result.value(); port_num = bind_result.value();
@ -505,5 +506,16 @@ int grpc_server_add_chaotic_good_port(grpc_server* server, const char* addr) {
} }
core_server->AddListener(std::move(listener)); core_server->AddListener(std::move(listener));
} }
if (error_list.size() == resolved_or->size()) {
LOG(ERROR) << "Failed to bind any address for " << addr;
for (const auto& error : error_list) {
LOG(ERROR) << " " << error.first << ": " << error.second;
}
} else if (!error_list.empty()) {
LOG(INFO) << "Failed to bind some addresses for " << addr;
for (const auto& error : error_list) {
LOG(INFO) << " " << error.first << ": " << error.second;
}
}
return port_num; return port_num;
} }

@ -78,24 +78,27 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
gpr_log(GPR_INFO, "CHAOTIC_GOOD: PushFragmentIntoCall: frame=%s", gpr_log(GPR_INFO, "CHAOTIC_GOOD: PushFragmentIntoCall: frame=%s",
frame.ToString().c_str()); frame.ToString().c_str());
} }
return TrySeq(If( return Seq(If(
frame.message.has_value(), frame.message.has_value(),
[&call_initiator, &frame]() mutable { [&call_initiator, &frame]() mutable {
return call_initiator.PushMessage( return call_initiator.PushMessage(
std::move(frame.message->message)); std::move(frame.message->message));
}, },
[]() -> StatusFlag { return Success{}; }), []() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream, [this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id]() mutable -> StatusFlag { stream_id](StatusFlag status) mutable -> StatusFlag {
if (end_of_stream) { if (!status.ok() && grpc_chaotic_good_trace.enabled()) {
call_initiator.FinishSends(); gpr_log(GPR_INFO, "CHAOTIC_GOOD: Failed PushFragmentIntoCall");
// We have received end_of_stream. It is now safe to remove }
// the call from the stream map. if (end_of_stream || !status.ok()) {
MutexLock lock(&mu_); call_initiator.FinishSends();
stream_map_.erase(stream_id); // We have received end_of_stream. It is now safe to remove
} // the call from the stream map.
return Success{}; MutexLock lock(&mu_);
}); stream_map_.erase(stream_id);
}
return Success{};
});
} }
auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall( auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
@ -340,13 +343,14 @@ auto ChaoticGoodServerTransport::TransportReadLoop(
auto ChaoticGoodServerTransport::OnTransportActivityDone( auto ChaoticGoodServerTransport::OnTransportActivityDone(
absl::string_view activity) { absl::string_view activity) {
return [this, activity](absl::Status status) { return [self = RefAsSubclass<ChaoticGoodServerTransport>(),
activity](absl::Status status) {
if (grpc_chaotic_good_trace.enabled()) { if (grpc_chaotic_good_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"CHAOTIC_GOOD: OnTransportActivityDone: activity=%s status=%s", "CHAOTIC_GOOD: OnTransportActivityDone: activity=%s status=%s",
std::string(activity).c_str(), status.ToString().c_str()); std::string(activity).c_str(), status.ToString().c_str());
} }
AbortWithError(); self->AbortWithError();
}; };
} }

@ -290,7 +290,7 @@ class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
} }
}; };
absl::StatusOr<OrphanablePtr<Channel>> CreateChannel(const char* target, absl::StatusOr<RefCountedPtr<Channel>> CreateChannel(const char* target,
const ChannelArgs& args) { const ChannelArgs& args) {
if (target == nullptr) { if (target == nullptr) {
LOG(ERROR) << "cannot create channel with NULL target name"; LOG(ERROR) << "cannot create channel with NULL target name";

@ -171,9 +171,7 @@ class InprocClientTransport final : public ClientTransport {
}; };
bool UsePromiseBasedTransport() { bool UsePromiseBasedTransport() {
if (!IsPromiseBasedInprocTransportEnabled()) return false; return IsPromiseBasedInprocTransportEnabled();
CHECK(IsPromiseBasedClientCallEnabled());
return true;
} }
OrphanablePtr<InprocClientTransport> OrphanablePtr<InprocClientTransport>
@ -182,7 +180,7 @@ InprocServerTransport::MakeClientTransport() {
RefAsSubclass<InprocServerTransport>()); RefAsSubclass<InprocServerTransport>());
} }
OrphanablePtr<Channel> MakeLameChannel(absl::string_view why, RefCountedPtr<Channel> MakeLameChannel(absl::string_view why,
absl::Status error) { absl::Status error) {
gpr_log(GPR_ERROR, "%s: %s", std::string(why).c_str(), gpr_log(GPR_ERROR, "%s: %s", std::string(why).c_str(),
std::string(error.message()).c_str()); std::string(error.message()).c_str());
@ -191,11 +189,11 @@ OrphanablePtr<Channel> MakeLameChannel(absl::string_view why,
if (grpc_error_get_int(error, StatusIntProperty::kRpcStatus, &integer)) { if (grpc_error_get_int(error, StatusIntProperty::kRpcStatus, &integer)) {
status = static_cast<grpc_status_code>(integer); status = static_cast<grpc_status_code>(integer);
} }
return OrphanablePtr<Channel>(Channel::FromC(grpc_lame_client_channel_create( return RefCountedPtr<Channel>(Channel::FromC(grpc_lame_client_channel_create(
nullptr, status, std::string(why).c_str()))); nullptr, status, std::string(why).c_str())));
} }
OrphanablePtr<Channel> MakeInprocChannel(Server* server, RefCountedPtr<Channel> MakeInprocChannel(Server* server,
ChannelArgs client_channel_args) { ChannelArgs client_channel_args) {
auto transports = MakeInProcessTransportPair(server->channel_args()); auto transports = MakeInProcessTransportPair(server->channel_args());
auto client_transport = std::move(transports.first); auto client_transport = std::move(transports.first);

@ -124,8 +124,7 @@ grpc_error_handle grpc_channel_stack_init(
if (grpc_trace_channel_stack.enabled()) { if (grpc_trace_channel_stack.enabled()) {
LOG(INFO) << "CHANNEL_STACK: init " << name; LOG(INFO) << "CHANNEL_STACK: init " << name;
for (size_t i = 0; i < filter_count; i++) { for (size_t i = 0; i < filter_count; i++) {
gpr_log(GPR_INFO, "CHANNEL_STACK: filter %s%s", filters[i]->name, LOG(INFO) << "CHANNEL_STACK: filter " << filters[i]->name;
filters[i]->make_call_promise ? " [promise-capable]" : "");
} }
} }
@ -297,35 +296,6 @@ grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) {
void grpc_channel_stack_no_post_init(grpc_channel_stack*, void grpc_channel_stack_no_post_init(grpc_channel_stack*,
grpc_channel_element*) {} grpc_channel_element*) {}
namespace {
grpc_core::NextPromiseFactory ClientNext(grpc_channel_element* elem) {
return [elem](grpc_core::CallArgs args) {
return elem->filter->make_call_promise(elem, std::move(args),
ClientNext(elem + 1));
};
}
} // namespace
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
grpc_channel_stack::MakeClientCallPromise(grpc_core::CallArgs call_args) {
return ClientNext(grpc_channel_stack_element(this, 0))(std::move(call_args));
}
void grpc_channel_stack::InitClientCallSpine(
grpc_core::CallSpineInterface* call) {
for (size_t i = 0; i < count; i++) {
auto* elem = grpc_channel_stack_element(this, i);
if (elem->filter->init_call == nullptr) {
grpc_core::Crash(
absl::StrCat("Filter '", elem->filter->name,
"' does not support the call-v3 interface"));
}
elem->filter->init_call(elem, call);
}
}
void grpc_call_log_op(const char* file, int line, gpr_log_severity severity, void grpc_call_log_op(const char* file, int line, gpr_log_severity severity,
grpc_call_element* elem, grpc_call_element* elem,
grpc_transport_stream_op_batch* op) { grpc_transport_stream_op_batch* op) {

@ -105,24 +105,6 @@ struct grpc_channel_filter {
// See grpc_call_next_op on how to call the next element in the stack // See grpc_call_next_op on how to call the next element in the stack
void (*start_transport_stream_op_batch)(grpc_call_element* elem, void (*start_transport_stream_op_batch)(grpc_call_element* elem,
grpc_transport_stream_op_batch* op); grpc_transport_stream_op_batch* op);
// Create a promise to execute one call.
// If this is non-null, it may be used in preference to
// start_transport_stream_op_batch.
// If this is used in preference to start_transport_stream_op_batch, the
// following can be omitted also:
// - calling init_call_elem, destroy_call_elem, set_pollset_or_pollset_set
// - allocation of memory for call data
// There is an on-going migration to move all filters to providing this, and
// then to drop start_transport_stream_op_batch.
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle> (*make_call_promise)(
grpc_channel_element* elem, grpc_core::CallArgs call_args,
grpc_core::NextPromiseFactory next_promise_factory);
// Register interceptors into a call.
// If this is non-null it may be used in preference to make_call_promise.
// There is an on-going migration to move all filters to providing this, and
// then to drop start_transport_stream_op_batch.
void (*init_call)(grpc_channel_element* elem,
grpc_core::CallSpineInterface* call_spine);
// Called to handle channel level operations - e.g. new calls, or transport // Called to handle channel level operations - e.g. new calls, or transport
// closure. // closure.
// See grpc_channel_next_op on how to call the next element in the stack // See grpc_channel_next_op on how to call the next element in the stack
@ -233,13 +215,6 @@ struct grpc_channel_stack {
IncrementRefCount(); IncrementRefCount();
return grpc_core::RefCountedPtr<grpc_channel_stack>(this); return grpc_core::RefCountedPtr<grpc_channel_stack>(this);
} }
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
MakeClientCallPromise(grpc_core::CallArgs call_args);
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
MakeServerCallPromise(grpc_core::CallArgs call_args);
void InitClientCallSpine(grpc_core::CallSpineInterface* call);
}; };
// A call stack tracks a set of related filters for one call, and guarantees // A call stack tracks a set of related filters for one call, and guarantees

@ -74,11 +74,6 @@ class ChannelStackBuilder {
// Helper to add a filter to the end of the stack. // Helper to add a filter to the end of the stack.
void AppendFilter(const grpc_channel_filter* filter); void AppendFilter(const grpc_channel_filter* filter);
// Determine whether a promise-based call stack is able to be built.
// Iterates each filter and ensures that there's a promise factory there.
// This will go away once the promise conversion is completed.
virtual bool IsPromising() const = 0;
// Build the channel stack. // Build the channel stack.
// After success, *result holds the new channel stack, // After success, *result holds the new channel stack,
// prefix_bytes are allocated before the channel stack, // prefix_bytes are allocated before the channel stack,

@ -54,153 +54,12 @@
namespace grpc_core { namespace grpc_core {
namespace {
const grpc_channel_filter* PromiseTracingFilterFor(
const grpc_channel_filter* filter) {
struct DerivedFilter : public grpc_channel_filter {
explicit DerivedFilter(const grpc_channel_filter* filter)
: grpc_channel_filter{
// start_transport_stream_op_batch:
grpc_call_next_op,
// make_call_promise:
[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next_promise_factory)
-> ArenaPromise<ServerMetadataHandle> {
auto* source_filter =
static_cast<const DerivedFilter*>(elem->filter)->filter;
gpr_log(
GPR_DEBUG,
"%s[%s] CreateCallPromise: client_initial_metadata=%s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name,
call_args.client_initial_metadata->DebugString().c_str());
return [source_filter, child = next_promise_factory(
std::move(call_args))]() mutable {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: begin",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name);
auto r = child();
if (auto* p = r.value_if_ready()) {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: done: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, (*p)->DebugString().c_str());
} else {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: <<pending>>",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name);
}
return r;
};
},
/* init_call: */
[](grpc_channel_element* elem, CallSpineInterface* call) {
auto* c = DownCast<PipeBasedCallSpine*>(call);
auto* source_filter =
static_cast<const DerivedFilter*>(elem->filter)->filter;
c->client_initial_metadata().receiver.InterceptAndMap(
[source_filter](ClientMetadataHandle md) {
gpr_log(GPR_DEBUG, "%s[%s] OnClientInitialMetadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, md->DebugString().c_str());
return md;
});
c->client_to_server_messages().receiver.InterceptAndMap(
[source_filter](MessageHandle msg) {
gpr_log(GPR_DEBUG, "%s[%s] OnClientToServerMessage: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, msg->DebugString().c_str());
return msg;
});
c->server_initial_metadata().sender.InterceptAndMap(
[source_filter](ServerMetadataHandle md) {
gpr_log(GPR_DEBUG, "%s[%s] OnServerInitialMetadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, md->DebugString().c_str());
return md;
});
c->server_to_client_messages().sender.InterceptAndMap(
[source_filter](MessageHandle msg) {
gpr_log(GPR_DEBUG, "%s[%s] OnServerToClientMessage: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, msg->DebugString().c_str());
return msg;
});
},
grpc_channel_next_op,
/* sizeof_call_data: */ 0,
// init_call_elem:
[](grpc_call_element*, const grpc_call_element_args*) {
return absl::OkStatus();
},
grpc_call_stack_ignore_set_pollset_or_pollset_set,
// destroy_call_elem:
[](grpc_call_element*, const grpc_call_final_info*,
grpc_closure*) {},
// sizeof_channel_data:
0,
// init_channel_elem:
[](grpc_channel_element*, grpc_channel_element_args*) {
return absl::OkStatus();
},
// post_init_channel_elem:
[](grpc_channel_stack*, grpc_channel_element*) {},
// destroy_channel_elem:
[](grpc_channel_element*) {}, grpc_channel_next_get_info,
// name:
nullptr},
filter(filter),
name_str(absl::StrCat(filter->name, ".trace")) {
this->name = name_str.c_str();
}
const grpc_channel_filter* const filter;
const std::string name_str;
};
struct Globals {
Mutex mu;
absl::flat_hash_map<const grpc_channel_filter*,
std::unique_ptr<DerivedFilter>>
map ABSL_GUARDED_BY(mu);
};
auto* globals = NoDestructSingleton<Globals>::Get();
MutexLock lock(&globals->mu);
auto it = globals->map.find(filter);
if (it != globals->map.end()) return it->second.get();
return globals->map.emplace(filter, std::make_unique<DerivedFilter>(filter))
.first->second.get();
}
} // namespace
bool ChannelStackBuilderImpl::IsPromising() const {
for (const auto* filter : stack()) {
if (filter->make_call_promise == nullptr) return false;
}
return true;
}
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> absl::StatusOr<RefCountedPtr<grpc_channel_stack>>
ChannelStackBuilderImpl::Build() { ChannelStackBuilderImpl::Build() {
std::vector<const grpc_channel_filter*> stack; std::vector<const grpc_channel_filter*> stack;
const bool is_promising = IsPromising();
const bool is_client =
grpc_channel_stack_type_is_client(channel_stack_type());
const bool client_promise_tracing =
is_client && is_promising && grpc_call_trace.enabled();
const bool server_promise_tracing =
!is_client && is_promising && grpc_call_trace.enabled();
for (const auto* filter : this->stack()) { for (const auto* filter : this->stack()) {
if (client_promise_tracing) {
stack.push_back(PromiseTracingFilterFor(filter));
}
stack.push_back(filter); stack.push_back(filter);
if (server_promise_tracing) {
stack.push_back(PromiseTracingFilterFor(filter));
}
}
if (server_promise_tracing) {
stack.pop_back(); // connected_channel must be last => can't be traced
} }
// calculate the size of the channel stack // calculate the size of the channel stack

@ -34,8 +34,6 @@ class ChannelStackBuilderImpl final : public ChannelStackBuilder {
public: public:
using ChannelStackBuilder::ChannelStackBuilder; using ChannelStackBuilder::ChannelStackBuilder;
bool IsPromising() const override;
// Build the channel stack. // Build the channel stack.
// After success, *result holds the new channel stack, // After success, *result holds the new channel stack,
// prefix_bytes are allocated before the channel stack, // prefix_bytes are allocated before the channel stack,

@ -74,7 +74,6 @@
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h" #include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h" #include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/batch_builder.h"
#include "src/core/lib/transport/error_utils.h" #include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h" #include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h" #include "src/core/lib/transport/transport.h"
@ -245,683 +244,48 @@ static void connected_channel_get_channel_info(
namespace grpc_core { namespace grpc_core {
namespace { namespace {
const grpc_channel_filter kConnectedFilter{
#if defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL) || \ connected_channel_start_transport_stream_op_batch,
defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL) connected_channel_start_transport_op,
class ConnectedChannelStream : public Orphanable { sizeof(call_data),
public: connected_channel_init_call_elem,
explicit ConnectedChannelStream(Transport* transport) set_pollset_or_pollset_set,
: transport_(transport), stream_(nullptr, StreamDeleter(this)) { connected_channel_destroy_call_elem,
GRPC_STREAM_REF_INIT( sizeof(channel_data),
&stream_refcount_, 1, connected_channel_init_channel_elem,
[](void* p, grpc_error_handle) { +[](grpc_channel_stack* channel_stack, grpc_channel_element* elem) {
static_cast<ConnectedChannelStream*>(p)->BeginDestroy(); // HACK(ctiller): increase call stack size for the channel to make
}, // space for channel data. We need a cleaner (but performant) way to
this, "ConnectedChannelStream"); // do this, and I'm not sure what that is yet. This is only "safe"
} // because call stacks place no additional data after the last call
// element, and the last call element MUST be the connected channel.
Transport* transport() { return transport_; } auto* transport =
grpc_closure* stream_destroyed_closure() { return &stream_destroyed_; } static_cast<channel_data*>(elem->channel_data)->transport;
if (transport->filter_stack_transport() != nullptr) {
BatchBuilder::Target batch_target() { channel_stack->call_stack_size +=
return BatchBuilder::Target{transport_, stream_.get(), &stream_refcount_}; transport->filter_stack_transport()->SizeOfStream();
} }
},
void IncrementRefCount(const char* reason = "smartptr") { connected_channel_destroy_channel_elem,
#ifndef NDEBUG connected_channel_get_channel_info,
grpc_stream_ref(&stream_refcount_, reason); "connected",
#else
(void)reason;
grpc_stream_ref(&stream_refcount_);
#endif
}
void Unref(const char* reason = "smartptr") {
#ifndef NDEBUG
grpc_stream_unref(&stream_refcount_, reason);
#else
(void)reason;
grpc_stream_unref(&stream_refcount_);
#endif
}
RefCountedPtr<ConnectedChannelStream> InternalRef() {
IncrementRefCount("smartptr");
return RefCountedPtr<ConnectedChannelStream>(this);
}
void Orphan() final {
bool finished = finished_.IsSet();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Orphan stream, finished: %d",
party_->DebugTag().c_str(), finished);
}
// If we hadn't already observed the stream to be finished, we need to
// cancel it at the transport.
if (!finished) {
party_->Spawn(
"finish",
[self = InternalRef()]() {
if (!self->finished_.IsSet()) {
self->finished_.Set();
}
return Empty{};
},
[](Empty) {});
GetContext<BatchBuilder>()->Cancel(batch_target(),
absl::CancelledError());
}
Unref("orphan connected stream");
}
// Returns a promise that implements the receive message loop.
auto RecvMessages(PipeSender<MessageHandle>* incoming_messages,
bool cancel_on_error);
// Returns a promise that implements the send message loop.
auto SendMessages(PipeReceiver<MessageHandle>* outgoing_messages);
void SetStream(grpc_stream* stream) { stream_.reset(stream); }
grpc_stream* stream() { return stream_.get(); }
grpc_stream_refcount* stream_refcount() { return &stream_refcount_; }
void set_finished() { finished_.Set(); }
auto WaitFinished() { return finished_.Wait(); }
private:
class StreamDeleter {
public:
explicit StreamDeleter(ConnectedChannelStream* impl) : impl_(impl) {}
void operator()(grpc_stream* stream) const {
if (stream == nullptr) return;
impl_->transport()->filter_stack_transport()->DestroyStream(
stream, impl_->stream_destroyed_closure());
}
private:
ConnectedChannelStream* impl_;
};
using StreamPtr = std::unique_ptr<grpc_stream, StreamDeleter>;
void StreamDestroyed() {
call_context_->RunInContext([this] { this->~ConnectedChannelStream(); });
}
void BeginDestroy() {
if (stream_ != nullptr) {
stream_.reset();
} else {
StreamDestroyed();
}
}
Transport* const transport_;
RefCountedPtr<CallContext> const call_context_{
GetContext<CallContext>()->Ref()};
grpc_closure stream_destroyed_ =
MakeMemberClosure<ConnectedChannelStream,
&ConnectedChannelStream::StreamDestroyed>(
this, DEBUG_LOCATION);
grpc_stream_refcount stream_refcount_;
StreamPtr stream_;
Arena* arena_ = GetContext<Arena>();
Party* const party_ = GetContext<Party>();
ExternallyObservableLatch<void> finished_;
}; };
auto ConnectedChannelStream::RecvMessages(
PipeSender<MessageHandle>* incoming_messages, bool cancel_on_error) {
return Loop([self = InternalRef(), cancel_on_error,
incoming_messages = std::move(*incoming_messages)]() mutable {
return Seq(
GetContext<BatchBuilder>()->ReceiveMessage(self->batch_target()),
[cancel_on_error, &incoming_messages](
absl::StatusOr<absl::optional<MessageHandle>> status) mutable {
bool has_message = status.ok() && status->has_value();
auto publish_message = [&incoming_messages, &status]() {
auto pending_message = std::move(**status);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[connected] RecvMessage: received payload of %" PRIdPTR
" bytes",
GetContext<Activity>()->DebugTag().c_str(),
pending_message->payload()->Length());
}
return Map(incoming_messages.Push(std::move(pending_message)),
[](bool ok) -> LoopCtl<absl::Status> {
if (!ok) {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_INFO,
"%s[connected] RecvMessage: failed to "
"push message towards the application",
GetContext<Activity>()->DebugTag().c_str());
}
return absl::OkStatus();
}
return Continue{};
});
};
auto publish_close = [cancel_on_error, &incoming_messages,
&status]() mutable {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[connected] RecvMessage: reached end of stream with "
"status:%s",
GetContext<Activity>()->DebugTag().c_str(),
status.status().ToString().c_str());
}
if (cancel_on_error && !status.ok()) {
incoming_messages.CloseWithError();
} else {
incoming_messages.Close();
}
return Immediate(LoopCtl<absl::Status>(status.status()));
};
return If(has_message, std::move(publish_message),
std::move(publish_close));
});
});
}
auto ConnectedChannelStream::SendMessages(
PipeReceiver<MessageHandle>* outgoing_messages) {
return ForEach(std::move(*outgoing_messages),
[self = InternalRef()](MessageHandle message) {
return GetContext<BatchBuilder>()->SendMessage(
self->batch_target(), std::move(message));
});
}
#endif // defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL) ||
// defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(Transport* transport,
CallArgs call_args,
NextPromiseFactory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(stream->stream(),
stream->stream_refcount(),
nullptr, GetContext<Arena>());
auto* party = GetContext<Party>();
party->Spawn("set_polling_entity", call_args.polling_entity->Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
// Start a loop to send messages from client_to_server_messages to the
// transport. When the pipe closes and the loop completes, send a trailing
// metadata batch to close the stream.
party->Spawn(
"send_messages",
TrySeq(stream->SendMessages(call_args.client_to_server_messages),
[stream = stream->InternalRef()]() {
return GetContext<BatchBuilder>()->SendClientTrailingMetadata(
stream->batch_target());
}),
[](absl::Status) {});
// Start a promise to receive server initial metadata and then forward it up
// through the receiving pipe.
auto server_initial_metadata = Arena::MakePooled<ServerMetadata>();
party->Spawn(
"recv_initial_metadata",
TrySeq(GetContext<BatchBuilder>()->ReceiveServerInitialMetadata(
stream->batch_target()),
[pipe = call_args.server_initial_metadata](
ServerMetadataHandle server_initial_metadata) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG,
"%s[connected] Publish client initial metadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
server_initial_metadata->DebugString().c_str());
}
return Map(pipe->Push(std::move(server_initial_metadata)),
[](bool r) {
if (r) return absl::OkStatus();
return absl::CancelledError();
});
}),
[](absl::Status) {});
// Build up the rest of the main call promise:
// Create a promise that will send initial metadata and then signal completion
// of that via the token.
auto send_initial_metadata = Seq(
GetContext<BatchBuilder>()->SendClientInitialMetadata(
stream->batch_target(), std::move(call_args.client_initial_metadata)),
[sent_initial_metadata_token =
std::move(call_args.client_initial_metadata_outstanding)](
absl::Status status) mutable {
sent_initial_metadata_token.Complete(status.ok());
return status;
});
// Create a promise that will receive server trailing metadata.
// If this fails, we massage the error into metadata that we can report
// upwards.
auto server_trailing_metadata = Arena::MakePooled<ServerMetadata>();
auto recv_trailing_metadata = Map(
GetContext<BatchBuilder>()->ReceiveServerTrailingMetadata(
stream->batch_target()),
[](absl::StatusOr<ServerMetadataHandle> status) mutable {
if (!status.ok()) {
auto server_trailing_metadata = Arena::MakePooled<ServerMetadata>();
grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
std::string message;
grpc_error_get_status(status.status(), Timestamp::InfFuture(),
&status_code, &message, nullptr, nullptr);
server_trailing_metadata->Set(GrpcStatusMetadata(), status_code);
server_trailing_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(message));
return server_trailing_metadata;
} else {
return std::move(*status);
}
});
// Finally the main call promise.
// Concurrently: send initial metadata and receive messages, until BOTH
// complete (or one fails).
// Next: receive trailing metadata, and return that up the stack.
auto recv_messages =
stream->RecvMessages(call_args.server_to_client_messages, false);
return Map(
[send_initial_metadata = std::move(send_initial_metadata),
recv_messages = std::move(recv_messages),
recv_trailing_metadata = std::move(recv_trailing_metadata),
done_send_initial_metadata = false, done_recv_messages = false,
done_recv_trailing_metadata =
false]() mutable -> Poll<ServerMetadataHandle> {
if (!done_send_initial_metadata) {
auto p = send_initial_metadata();
if (auto* r = p.value_if_ready()) {
done_send_initial_metadata = true;
if (!r->ok()) return StatusCast<ServerMetadataHandle>(*r);
}
}
if (!done_recv_messages) {
auto p = recv_messages();
if (p.ready()) {
// NOTE: ignore errors here, they'll be collected in the
// recv_trailing_metadata.
done_recv_messages = true;
} else {
return Pending{};
}
}
if (!done_recv_trailing_metadata) {
auto p = recv_trailing_metadata();
if (auto* r = p.value_if_ready()) {
done_recv_trailing_metadata = true;
return std::move(*r);
}
}
return Pending{};
},
[stream = std::move(stream)](ServerMetadataHandle result) {
stream->set_finished();
return result;
});
}
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
Transport* transport, CallArgs, NextPromiseFactory next_promise_factory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(
stream->stream(), stream->stream_refcount(),
GetContext<CallContext>()->server_call_context()->server_stream_data(),
GetContext<Arena>());
auto* party = GetContext<Party>();
// Arifacts we need for the lifetime of the call.
struct CallData {
Pipe<MessageHandle> server_to_client;
Pipe<MessageHandle> client_to_server;
Pipe<ServerMetadataHandle> server_initial_metadata;
Latch<ServerMetadataHandle> failure_latch;
Latch<grpc_polling_entity> polling_entity_latch;
bool sent_initial_metadata = false;
bool sent_trailing_metadata = false;
};
auto* call_data = GetContext<Arena>()->New<CallData>();
GetContext<CallFinalization>()->Add(
[call_data](const grpc_call_final_info*) { call_data->~CallData(); });
party->Spawn("set_polling_entity", call_data->polling_entity_latch.Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
auto server_to_client_empty =
call_data->server_to_client.receiver.AwaitEmpty();
// Create a promise that will receive client initial metadata, and then run
// the main stem of the call (calling next_promise_factory up through the
// filters).
// Race the main call with failure_latch, allowing us to forcefully complete
// the call in the case of a failure.
auto recv_initial_metadata_then_run_promise =
TrySeq(GetContext<BatchBuilder>()->ReceiveClientInitialMetadata(
stream->batch_target()),
[next_promise_factory = std::move(next_promise_factory),
server_to_client_empty = std::move(server_to_client_empty),
call_data](ClientMetadataHandle client_initial_metadata) {
auto call_promise = next_promise_factory(CallArgs{
std::move(client_initial_metadata),
ClientInitialMetadataOutstandingToken::Empty(),
&call_data->polling_entity_latch,
&call_data->server_initial_metadata.sender,
&call_data->client_to_server.receiver,
&call_data->server_to_client.sender,
});
return Race(call_data->failure_latch.Wait(),
[call_promise = std::move(call_promise),
server_to_client_empty =
std::move(server_to_client_empty)]() mutable
-> Poll<ServerMetadataHandle> {
// TODO(ctiller): this is deeply weird and we need
// to clean this up.
//
// The following few lines check to ensure that
// there's no message currently pending in the
// outgoing message queue, and if (and only if)
// that's true decides to poll the main promise to
// see if there's a result.
//
// This essentially introduces a polling priority
// scheme that makes the current promise structure
// work out the way we want when talking to
// transports.
//
// The problem is that transports are going to need
// to replicate this structure when they convert to
// promises, and that becomes troubling as we'll be
// replicating weird throughout the stack.
//
// Instead we likely need to change the way we're
// composing promises through the stack.
//
// Proposed is to change filters from a promise
// that takes ClientInitialMetadata and returns
// ServerTrailingMetadata with three pipes for
// ServerInitialMetadata and
// ClientToServerMessages, ServerToClientMessages.
// Instead we'll have five pipes, moving
// ClientInitialMetadata and ServerTrailingMetadata
// to pipes that can be intercepted.
//
// The effect of this change will be to cripple the
// things that can be done in a filter (but cripple
// in line with what most filters actually do).
// We'll likely need to add a `CallContext::Cancel`
// to allow filters to cancel a request, but this
// would also have the advantage of centralizing
// our cancellation machinery which seems like an
// additional win - with the net effect that the
// shape of the call gets made explicit at the top
// & bottom of the stack.
//
// There's a small set of filters (retry, this one,
// lame client, clinet channel) that terminate
// stacks and need a richer set of semantics, but
// that ends up being fine because we can spawn
// tasks in parties to handle those edge cases, and
// keep the majority of filters simple: they just
// call InterceptAndMap on a handful of filters at
// call initialization time and then proceed to
// actually filter.
//
// So that's the plan, why isn't it enacted here?
//
// Well, the plan ends up being easy to implement
// in the promise based world (I did a prototype on
// a branch in an afternoon). It's heinous to
// implement in promise_based_filter, and that code
// is load bearing for us at the time of writing.
// It's not worth delaying promises for a further N
// months (N ~ 6) to make that change.
//
// Instead, we'll move forward with this, get
// promise_based_filter out of the picture, and
// then during the mop-up phase for promises tweak
// the compute structure to move to the magical
// five pipes (I'm reminded of an old Onion
// article), and end up in a good happy place.
if (server_to_client_empty().pending()) {
return Pending{};
}
return call_promise();
});
});
// Promise factory that accepts a ServerMetadataHandle, and sends it as the
// trailing metadata for this call.
auto send_trailing_metadata = [call_data, stream = stream->InternalRef()](
ServerMetadataHandle
server_trailing_metadata) {
bool is_cancellation =
server_trailing_metadata->get(GrpcCallWasCancelled()).value_or(false);
return GetContext<BatchBuilder>()->SendServerTrailingMetadata(
stream->batch_target(), std::move(server_trailing_metadata),
is_cancellation ||
!std::exchange(call_data->sent_initial_metadata, true));
};
// Runs the receive message loop, either until all the messages
// are received or the server call is complete.
party->Spawn(
"recv_messages",
Race(
Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
Map(stream->RecvMessages(&call_data->client_to_server.sender, true),
[failure_latch = &call_data->failure_latch](absl::Status status) {
if (!status.ok() && !failure_latch->is_set()) {
failure_latch->Set(ServerMetadataFromStatus(status));
}
return status;
})),
[](absl::Status) {});
// Run a promise that will send initial metadata (if that pipe sends some).
// And then run the send message loop until that completes.
auto send_initial_metadata = Seq(
Race(Map(stream->WaitFinished(),
[](Empty) { return NextResult<ServerMetadataHandle>(true); }),
call_data->server_initial_metadata.receiver.Next()),
[call_data, stream = stream->InternalRef()](
NextResult<ServerMetadataHandle> next_result) mutable {
auto md = !call_data->sent_initial_metadata && next_result.has_value()
? std::move(next_result.value())
: nullptr;
if (md != nullptr) {
call_data->sent_initial_metadata = true;
auto* party = GetContext<Party>();
party->Spawn("connected/send_initial_metadata",
GetContext<BatchBuilder>()->SendServerInitialMetadata(
stream->batch_target(), std::move(md)),
[](absl::Status) {});
return Immediate(absl::OkStatus());
}
return Immediate(absl::CancelledError());
});
party->Spawn(
"send_initial_metadata_then_messages",
Race(Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
TrySeq(std::move(send_initial_metadata),
stream->SendMessages(&call_data->server_to_client.receiver))),
[](absl::Status) {});
// Spawn a job to fetch the "client trailing metadata" - if this is OK then
// it's client done, otherwise it's a signal of cancellation from the client
// which we'll use failure_latch to signal.
party->Spawn(
"recv_trailing_metadata",
Seq(GetContext<BatchBuilder>()->ReceiveClientTrailingMetadata(
stream->batch_target()),
[failure_latch = &call_data->failure_latch](
absl::StatusOr<ClientMetadataHandle> status) mutable {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"%s[connected] Got trailing metadata; status=%s metadata=%s",
GetContext<Activity>()->DebugTag().c_str(),
status.status().ToString().c_str(),
status.ok() ? (*status)->DebugString().c_str() : "<none>");
}
ClientMetadataHandle trailing_metadata;
if (status.ok()) {
trailing_metadata = std::move(*status);
} else {
trailing_metadata = Arena::MakePooled<ClientMetadata>();
grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
std::string message;
grpc_error_get_status(status.status(), Timestamp::InfFuture(),
&status_code, &message, nullptr, nullptr);
trailing_metadata->Set(GrpcStatusMetadata(), status_code);
trailing_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(message));
}
if (trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
if (!failure_latch->is_set()) {
failure_latch->Set(std::move(trailing_metadata));
}
}
return Empty{};
}),
[](Empty) {});
// Finally assemble the main call promise:
// Receive initial metadata from the client and start the promise up the
// filter stack.
// Upon completion, send trailing metadata to the client and then return it
// (allowing the call code to decide on what signalling to give the
// application).
struct CleanupPollingEntityLatch {
void operator()(Latch<grpc_polling_entity>* latch) {
if (!latch->is_set()) latch->Set(grpc_polling_entity());
}
};
auto cleanup_polling_entity_latch =
std::unique_ptr<Latch<grpc_polling_entity>, CleanupPollingEntityLatch>(
&call_data->polling_entity_latch);
struct CleanupSendInitialMetadata {
void operator()(CallData* call_data) {
call_data->server_initial_metadata.receiver.CloseWithError();
}
};
auto cleanup_send_initial_metadata =
std::unique_ptr<CallData, CleanupSendInitialMetadata>(call_data);
return Map(
Seq(std::move(recv_initial_metadata_then_run_promise),
std::move(send_trailing_metadata)),
[cleanup_polling_entity_latch = std::move(cleanup_polling_entity_latch),
cleanup_send_initial_metadata = std::move(cleanup_send_initial_metadata),
stream = std::move(stream)](ServerMetadataHandle md) {
stream->set_finished();
return md;
});
}
#endif
template <ArenaPromise<ServerMetadataHandle> (*make_call_promise)(
Transport*, CallArgs, NextPromiseFactory)>
grpc_channel_filter MakeConnectedFilter() {
// Create a vtable that contains both the legacy call methods (for filter
// stack based calls) and the new promise based method for creating
// promise based calls (the latter iff make_call_promise != nullptr). In
// this way the filter can be inserted into either kind of channel stack,
// and only if all the filters in the stack are promise based will the
// call be promise based.
auto make_call_wrapper = +[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next) {
Transport* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
return make_call_promise(transport, std::move(call_args), std::move(next));
};
return {
connected_channel_start_transport_stream_op_batch,
make_call_promise != nullptr ? make_call_wrapper : nullptr,
/* init_call: */ nullptr,
connected_channel_start_transport_op,
sizeof(call_data),
connected_channel_init_call_elem,
set_pollset_or_pollset_set,
connected_channel_destroy_call_elem,
sizeof(channel_data),
connected_channel_init_channel_elem,
+[](grpc_channel_stack* channel_stack, grpc_channel_element* elem) {
// HACK(ctiller): increase call stack size for the channel to make
// space for channel data. We need a cleaner (but performant) way to
// do this, and I'm not sure what that is yet. This is only "safe"
// because call stacks place no additional data after the last call
// element, and the last call element MUST be the connected channel.
auto* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
if (transport->filter_stack_transport() != nullptr) {
channel_stack->call_stack_size +=
transport->filter_stack_transport()->SizeOfStream();
}
},
connected_channel_destroy_channel_elem,
connected_channel_get_channel_info,
"connected",
};
}
ArenaPromise<ServerMetadataHandle> MakeClientTransportCallPromise(
Transport* transport, CallArgs call_args, NextPromiseFactory) {
auto spine = GetContext<CallContext>()->MakeCallSpine(std::move(call_args));
transport->client_transport()->StartCall(CallHandler{spine});
return spine->PullServerTrailingMetadata();
}
const grpc_channel_filter kClientPromiseBasedTransportFilter =
MakeConnectedFilter<MakeClientTransportCallPromise>();
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
const grpc_channel_filter kClientEmulatedFilter =
MakeConnectedFilter<MakeClientCallPromise>();
#else
const grpc_channel_filter kClientEmulatedFilter =
MakeConnectedFilter<nullptr>();
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
const grpc_channel_filter kServerEmulatedFilter =
MakeConnectedFilter<MakeServerCallPromise>();
#else
const grpc_channel_filter kServerEmulatedFilter =
MakeConnectedFilter<nullptr>();
#endif
// noop filter for the v3 stack: placeholder for now because other code requires // noop filter for the v3 stack: placeholder for now because other code requires
// we have a terminator. // we have a terminator.
// TODO(ctiller): delete when v3 transition is complete. // TODO(ctiller): delete when v3 transition is complete.
const grpc_channel_filter kServerPromiseBasedTransportFilter = { const grpc_channel_filter kPromiseBasedTransportFilter = {
nullptr, nullptr,
[](grpc_channel_element*, CallArgs, NextPromiseFactory)
-> ArenaPromise<ServerMetadataHandle> { Crash("not implemented"); },
/* init_call: */ [](grpc_channel_element*, CallSpineInterface*) {},
connected_channel_start_transport_op, connected_channel_start_transport_op,
0, 0,
nullptr, nullptr,
set_pollset_or_pollset_set, set_pollset_or_pollset_set,
nullptr, nullptr,
sizeof(channel_data), sizeof(channel_data),
connected_channel_init_channel_elem, +[](grpc_channel_element*, grpc_channel_element_args*) {
return absl::InternalError(
"Cannot use filter based stack with promise based transports");
},
+[](grpc_channel_stack*, grpc_channel_element*) {}, +[](grpc_channel_stack*, grpc_channel_element*) {},
connected_channel_destroy_channel_elem, connected_channel_destroy_channel_elem,
connected_channel_get_channel_info, connected_channel_get_channel_info,
@ -937,7 +301,6 @@ bool TransportSupportsServerPromiseBasedCalls(const ChannelArgs& args) {
auto* transport = args.GetObject<Transport>(); auto* transport = args.GetObject<Transport>();
return transport->server_transport() != nullptr; return transport->server_transport() != nullptr;
} }
} // namespace } // namespace
void RegisterConnectedChannel(CoreConfiguration::Builder* builder) { void RegisterConnectedChannel(CoreConfiguration::Builder* builder) {
@ -950,31 +313,30 @@ void RegisterConnectedChannel(CoreConfiguration::Builder* builder) {
// Option 1, and our ideal: the transport supports promise based calls, // Option 1, and our ideal: the transport supports promise based calls,
// and so we simply use the transport directly. // and so we simply use the transport directly.
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, ->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kPromiseBasedTransportFilter)
&kClientPromiseBasedTransportFilter)
.Terminal() .Terminal()
.If(TransportSupportsClientPromiseBasedCalls); .If(TransportSupportsClientPromiseBasedCalls);
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, ->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL,
&kClientPromiseBasedTransportFilter) &kPromiseBasedTransportFilter)
.Terminal() .Terminal()
.If(TransportSupportsClientPromiseBasedCalls); .If(TransportSupportsClientPromiseBasedCalls);
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_SERVER_CHANNEL, &kServerPromiseBasedTransportFilter) ->RegisterFilter(GRPC_SERVER_CHANNEL, &kPromiseBasedTransportFilter)
.Terminal() .Terminal()
.If(TransportSupportsServerPromiseBasedCalls); .If(TransportSupportsServerPromiseBasedCalls);
// Option 2: the transport does not support promise based calls. // Option 2: the transport does not support promise based calls.
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kClientEmulatedFilter) ->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kConnectedFilter)
.Terminal() .Terminal()
.IfNot(TransportSupportsClientPromiseBasedCalls); .IfNot(TransportSupportsClientPromiseBasedCalls);
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, &kClientEmulatedFilter) ->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, &kConnectedFilter)
.Terminal() .Terminal()
.IfNot(TransportSupportsClientPromiseBasedCalls); .IfNot(TransportSupportsClientPromiseBasedCalls);
builder->channel_init() builder->channel_init()
->RegisterFilter(GRPC_SERVER_CHANNEL, &kServerEmulatedFilter) ->RegisterFilter(GRPC_SERVER_CHANNEL, &kConnectedFilter)
.Terminal() .Terminal()
.IfNot(TransportSupportsServerPromiseBasedCalls); .IfNot(TransportSupportsServerPromiseBasedCalls);
} }

@ -90,9 +90,6 @@ BaseCallData::BaseCallData(
arena_(args->arena), arena_(args->arena),
call_combiner_(args->call_combiner), call_combiner_(args->call_combiner),
deadline_(args->deadline), deadline_(args->deadline),
call_context_(flags & kFilterExaminesCallContext
? arena_->New<CallContext>(nullptr)
: nullptr),
server_initial_metadata_pipe_( server_initial_metadata_pipe_(
flags & kFilterExaminesServerInitialMetadata flags & kFilterExaminesServerInitialMetadata
? arena_->New<Pipe<ServerMetadataHandle>>(arena_) ? arena_->New<Pipe<ServerMetadataHandle>>(arena_)
@ -280,7 +277,7 @@ BaseCallData::Flusher::~Flusher() {
}; };
for (size_t i = 1; i < release_.size(); i++) { for (size_t i = 1; i < release_.size(); i++) {
auto* batch = release_[i]; auto* batch = release_[i];
if (call_->call_context_ != nullptr && call_->call_context_->traced()) { if (call_->call() != nullptr && call_->call()->traced()) {
batch->is_traced = true; batch->is_traced = true;
} }
if (grpc_trace_channel.enabled()) { if (grpc_trace_channel.enabled()) {
@ -300,7 +297,7 @@ BaseCallData::Flusher::~Flusher() {
gpr_log(GPR_INFO, "FLUSHER:forward batch: %s", gpr_log(GPR_INFO, "FLUSHER:forward batch: %s",
grpc_transport_stream_op_batch_string(release_[0], false).c_str()); grpc_transport_stream_op_batch_string(release_[0], false).c_str());
} }
if (call_->call_context_ != nullptr && call_->call_context_->traced()) { if (call_->call() != nullptr && call_->call()->traced()) {
release_[0]->is_traced = true; release_[0]->is_traced = true;
} }
grpc_call_next_op(call_->elem(), release_[0]); grpc_call_next_op(call_->elem(), release_[0]);

@ -75,12 +75,6 @@
namespace grpc_core { namespace grpc_core {
// HACK: If a filter has this type as a base class it will be skipped in
// v3 filter stacks. This is a temporary measure to allow the v3 filter stack
// to be bought up whilst some tests inadvertently rely on hard to convert
// filters.
class HackyHackyHackySkipInV3FilterStacks {};
class ChannelFilter { class ChannelFilter {
public: public:
class Args { class Args {
@ -608,220 +602,6 @@ inline void InterceptClientToServerMessage(const NoInterceptor*,
FilterCallData<Derived>*, FilterCallData<Derived>*,
const CallArgs&) {} const CallArgs&) {}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
ServerMetadataHandle (Derived::Call::*fn)(const Message&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return
[call, call_spine](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnClientToServerMessage(*msg);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
void (Derived::Call::*fn)(const Message&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine*) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call](MessageHandle msg) -> absl::optional<MessageHandle> {
call->OnClientToServerMessage(*msg);
return std::move(msg);
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
ServerMetadataHandle (Derived::Call::*fn)(const Message&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnClientToServerMessage(*msg, channel);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
MessageHandle (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel, PipeBasedCallSpine*) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, channel](MessageHandle msg) {
return call->OnClientToServerMessage(std::move(msg), channel);
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
absl::StatusOr<MessageHandle> (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto r = call->OnClientToServerMessage(std::move(msg), channel);
if (r.ok()) return std::move(*r);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(r.status()));
return absl::nullopt;
};
}
template <typename Derived, typename HookFunction>
inline void InterceptClientToServerMessage(HookFunction fn,
const NoInterceptor*,
typename Derived::Call* call,
Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
call_spine->client_to_server_messages().receiver.InterceptAndMap(
InterceptClientToServerMessageHandler(fn, call, channel, call_spine));
}
template <typename Derived, typename HookFunction>
inline void InterceptClientToServerMessage(HookFunction fn,
void (Derived::Call::*half_close)(),
typename Derived::Call* call,
Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
DCHECK(half_close == &Derived::Call::OnClientToServerHalfClose);
call_spine->client_to_server_messages().receiver.InterceptAndMapWithHalfClose(
InterceptClientToServerMessageHandler(fn, call, channel, call_spine),
[call]() { call->OnClientToServerHalfClose(); });
}
template <typename Derived>
inline void InterceptClientToServerMessage(const NoInterceptor*,
const NoInterceptor*,
typename Derived::Call*, Derived*,
PipeBasedCallSpine*) {}
inline void InterceptClientInitialMetadata(const NoInterceptor*, void*, void*,
PipeBasedCallSpine*) {}
template <typename Derived>
inline void InterceptClientInitialMetadata(
void (Derived::Call::*fn)(ClientMetadata& md), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call](ClientMetadataHandle md) {
call->OnClientInitialMetadata(*md);
return md;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
void (Derived::Call::*fn)(ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call, channel](ClientMetadataHandle md) {
call->OnClientInitialMetadata(*md, channel);
return md;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
ServerMetadataHandle (Derived::Call::*fn)(ClientMetadata& md),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine,
call](ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto return_md = call->OnClientInitialMetadata(*md);
if (return_md == nullptr) return std::move(md);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
ServerMetadataHandle (Derived::Call::*fn)(ClientMetadata& md,
Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine, call, channel](
ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto return_md = call->OnClientInitialMetadata(*md, channel);
if (return_md == nullptr) return std::move(md);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
absl::Status (Derived::Call::*fn)(ClientMetadata& md),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine,
call](ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto status = call->OnClientInitialMetadata(*md);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
absl::Status (Derived::Call::*fn)(ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine, call, channel](
ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto status = call->OnClientInitialMetadata(*md, channel);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
// Returning a promise that resolves to something that can be cast to
// ServerMetadataHandle also counts
template <typename Promise, typename Derived>
absl::void_t<decltype(StatusCast<ServerMetadataHandle>(
std::declval<PromiseResult<Promise>>))>
InterceptClientInitialMetadata(Promise (Derived::Call::*promise_factory)(
ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(promise_factory == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call, call_spine, channel](ClientMetadataHandle md) {
ClientMetadata& md_ref = *md;
return Map(call->OnClientInitialMetadata(md_ref, channel),
[md = std::move(md),
call_spine](PromiseResult<Promise> status) mutable
-> absl::optional<ClientMetadataHandle> {
if (IsStatusOk(status)) return std::move(md);
call_spine->PushServerTrailingMetadata(
StatusCast<ServerMetadataHandle>(std::move(status)));
return absl::nullopt;
});
});
}
template <typename CallArgs> template <typename CallArgs>
inline void InterceptServerInitialMetadata(const NoInterceptor*, void*, inline void InterceptServerInitialMetadata(const NoInterceptor*, void*,
const CallArgs&) {} const CallArgs&) {}
@ -885,67 +665,6 @@ inline void InterceptServerInitialMetadata(
}); });
} }
inline void InterceptServerInitialMetadata(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerInitialMetadata(
void (Derived::Call::*fn)(ServerMetadata&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call](ServerMetadataHandle md) {
call->OnServerInitialMetadata(*md);
return md;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
absl::Status (Derived::Call::*fn)(ServerMetadata&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, call_spine](
ServerMetadataHandle md) -> absl::optional<ServerMetadataHandle> {
auto status = call->OnServerInitialMetadata(*md);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
void (Derived::Call::*fn)(ServerMetadata&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, channel](ServerMetadataHandle md) {
call->OnServerInitialMetadata(*md, channel);
return md;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
absl::Status (Derived::Call::*fn)(ServerMetadata&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, call_spine, channel](
ServerMetadataHandle md) -> absl::optional<ServerMetadataHandle> {
auto status = call->OnServerInitialMetadata(*md, channel);
if (status.ok()) return std::move(md);
call_spine->PullServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
inline void InterceptServerToClientMessage(const NoInterceptor*, void*, inline void InterceptServerToClientMessage(const NoInterceptor*, void*,
const CallArgs&) {} const CallArgs&) {}
@ -1020,106 +739,6 @@ inline void InterceptServerToClientMessage(
}); });
} }
inline void InterceptServerToClientMessage(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerToClientMessage(
void (Derived::Call::*fn)(const Message&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call](MessageHandle msg) -> absl::optional<MessageHandle> {
call->OnServerToClientMessage(*msg);
return std::move(msg);
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
ServerMetadataHandle (Derived::Call::*fn)(const Message&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnServerToClientMessage(*msg);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
ServerMetadataHandle (Derived::Call::*fn)(const Message&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnServerToClientMessage(*msg, channel);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
MessageHandle (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, channel](MessageHandle msg) {
return call->OnServerToClientMessage(std::move(msg), channel);
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
absl::StatusOr<MessageHandle> (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto r = call->OnServerToClientMessage(std::move(msg), channel);
if (r.ok()) return std::move(*r);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(r.status()));
return absl::nullopt;
});
}
inline void InterceptServerTrailingMetadata(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
void (Derived::Call::*)(ServerMetadata&), typename Derived::Call*, Derived*,
PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
void (Derived::Call::*)(ServerMetadata&, Derived*), typename Derived::Call*,
Derived*, PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
absl::Status (Derived::Call::*)(ServerMetadata&), typename Derived::Call*,
Derived*, PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
inline void InterceptFinalize(const NoInterceptor*, void*, void*) {} inline void InterceptFinalize(const NoInterceptor*, void*, void*) {}
template <class Call> template <class Call>
@ -1221,29 +840,6 @@ template <typename Derived>
class ImplementChannelFilter : public ChannelFilter, class ImplementChannelFilter : public ChannelFilter,
public ImplementChannelFilterTag { public ImplementChannelFilterTag {
public: public:
// Natively construct a v3 call.
void InitCall(CallSpineInterface* call_spine) {
typename Derived::Call* call =
GetContext<Arena>()
->ManagedNew<promise_filter_detail::CallWrapper<Derived>>(
static_cast<Derived*>(this));
auto* c = DownCast<PipeBasedCallSpine*>(call_spine);
auto* d = static_cast<Derived*>(this);
promise_filter_detail::InterceptClientInitialMetadata(
&Derived::Call::OnClientInitialMetadata, call, d, c);
promise_filter_detail::InterceptClientToServerMessage(
&Derived::Call::OnClientToServerMessage,
&Derived::Call::OnClientToServerHalfClose, call, d, c);
promise_filter_detail::InterceptServerInitialMetadata(
&Derived::Call::OnServerInitialMetadata, call, d, c);
promise_filter_detail::InterceptServerToClientMessage(
&Derived::Call::OnServerToClientMessage, call, d, c);
promise_filter_detail::InterceptServerTrailingMetadata(
&Derived::Call::OnServerTrailingMetadata, call, d, c);
promise_filter_detail::InterceptFinalize(&Derived::Call::OnFinalize, d,
call);
}
// Polyfill for the original promise scheme. // Polyfill for the original promise scheme.
// Allows writing v3 filters that work with v2 stacks. // Allows writing v3 filters that work with v2 stacks.
// (and consequently also v1 stacks since we can polyfill back to that too). // (and consequently also v1 stacks since we can polyfill back to that too).
@ -1344,13 +940,14 @@ class BaseCallData : public Activity, private Wakeable {
virtual void StartBatch(grpc_transport_stream_op_batch* batch) = 0; virtual void StartBatch(grpc_transport_stream_op_batch* batch) = 0;
Call* call() { return arena_->GetContext<Call>(); }
protected: protected:
class ScopedContext : public promise_detail::Context<Arena>, class ScopedContext : public promise_detail::Context<Arena>,
public promise_detail::Context<grpc_polling_entity>, public promise_detail::Context<grpc_polling_entity>,
public promise_detail::Context<CallFinalization>, public promise_detail::Context<CallFinalization>,
public promise_detail::Context< public promise_detail::Context<
grpc_event_engine::experimental::EventEngine>, grpc_event_engine::experimental::EventEngine> {
public promise_detail::Context<CallContext> {
public: public:
explicit ScopedContext(BaseCallData* call_data) explicit ScopedContext(BaseCallData* call_data)
: promise_detail::Context<Arena>(call_data->arena_), : promise_detail::Context<Arena>(call_data->arena_),
@ -1358,8 +955,7 @@ class BaseCallData : public Activity, private Wakeable {
call_data->pollent_.load(std::memory_order_acquire)), call_data->pollent_.load(std::memory_order_acquire)),
promise_detail::Context<CallFinalization>(&call_data->finalization_), promise_detail::Context<CallFinalization>(&call_data->finalization_),
promise_detail::Context<grpc_event_engine::experimental::EventEngine>( promise_detail::Context<grpc_event_engine::experimental::EventEngine>(
call_data->event_engine_), call_data->event_engine_) {}
promise_detail::Context<CallContext>(call_data->call_context_) {}
}; };
class Flusher { class Flusher {
@ -1705,7 +1301,6 @@ class BaseCallData : public Activity, private Wakeable {
CallCombiner* const call_combiner_; CallCombiner* const call_combiner_;
const Timestamp deadline_; const Timestamp deadline_;
CallFinalization finalization_; CallFinalization finalization_;
CallContext* call_context_ = nullptr;
std::atomic<grpc_polling_entity*> pollent_{nullptr}; std::atomic<grpc_polling_entity*> pollent_{nullptr};
Pipe<ServerMetadataHandle>* const server_initial_metadata_pipe_; Pipe<ServerMetadataHandle>* const server_initial_metadata_pipe_;
SendMessage* const send_message_; SendMessage* const send_message_;
@ -2061,67 +1656,15 @@ struct ChannelFilterWithFlagsMethods {
// ChannelArgs channel_args, ChannelFilter::Args filter_args); // ChannelArgs channel_args, ChannelFilter::Args filter_args);
// }; // };
template <typename F, FilterEndpoint kEndpoint, uint8_t kFlags = 0> template <typename F, FilterEndpoint kEndpoint, uint8_t kFlags = 0>
absl::enable_if_t< absl::enable_if_t<std::is_base_of<ChannelFilter, F>::value &&
std::is_base_of<ChannelFilter, F>::value && !std::is_base_of<ImplementChannelFilterTag, F>::value,
!std::is_base_of<ImplementChannelFilterTag, F>::value && grpc_channel_filter>
!std::is_base_of<HackyHackyHackySkipInV3FilterStacks, F>::value,
grpc_channel_filter>
MakePromiseBasedFilter(const char* name) {
using CallData = promise_filter_detail::CallData<kEndpoint>;
return grpc_channel_filter{
// start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
nullptr,
// start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data
sizeof(CallData),
// init_call_elem
promise_filter_detail::CallDataFilterWithFlagsMethods<
CallData, kFlags>::InitCallElem,
// set_pollset_or_pollset_set
promise_filter_detail::BaseCallDataMethods::SetPollsetOrPollsetSet,
// destroy_call_elem
promise_filter_detail::CallDataFilterWithFlagsMethods<
CallData, kFlags>::DestroyCallElem,
// sizeof_channel_data
sizeof(F),
// init_channel_elem
promise_filter_detail::ChannelFilterWithFlagsMethods<
F, kFlags>::InitChannelElem,
// post_init_channel_elem
promise_filter_detail::ChannelFilterMethods::PostInitChannelElem,
// destroy_channel_elem
promise_filter_detail::ChannelFilterWithFlagsMethods<
F, kFlags>::DestroyChannelElem,
// get_channel_info
promise_filter_detail::ChannelFilterMethods::GetChannelInfo,
// name
name,
};
}
template <typename F, FilterEndpoint kEndpoint, uint8_t kFlags = 0>
absl::enable_if_t<
std::is_base_of<HackyHackyHackySkipInV3FilterStacks, F>::value,
grpc_channel_filter>
MakePromiseBasedFilter(const char* name) { MakePromiseBasedFilter(const char* name) {
using CallData = promise_filter_detail::CallData<kEndpoint>; using CallData = promise_filter_detail::CallData<kEndpoint>;
return grpc_channel_filter{ return grpc_channel_filter{
// start_transport_stream_op_batch // start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch, promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
[](grpc_channel_element* elem, CallSpineInterface*) {
GRPC_LOG_EVERY_N_SEC(
1, GPR_ERROR,
"gRPC V3 call stack in use, with a filter ('%s') that is not V3.",
elem->filter->name);
},
// start_transport_op // start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp, promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data // sizeof_call_data
@ -2160,11 +1703,6 @@ MakePromiseBasedFilter(const char* name) {
return grpc_channel_filter{ return grpc_channel_filter{
// start_transport_stream_op_batch // start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch, promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
[](grpc_channel_element* elem, CallSpineInterface* args) {
static_cast<F*>(elem->channel_data)->InitCall(args);
},
// start_transport_op // start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp, promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data // sizeof_call_data

@ -76,24 +76,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new = const char* const description_pick_first_new =
"New pick_first impl with memory reduction."; "New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}"; const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport = const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport."; "Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}"; const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit = const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short " "On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration"; "duration";
@ -131,12 +116,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}"; const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = { const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)}; static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace } // namespace
namespace grpc_core { namespace grpc_core {
@ -175,16 +154,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true}, additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new, {"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true}, additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport", {"promise_based_inproc_transport",
description_promise_based_inproc_transport, description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport, additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
required_experiments_promise_based_inproc_transport, 1, false, false}, false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0, {"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true}, false, true},
{"schedule_cancellation_over_write", {"schedule_cancellation_over_write",
@ -210,8 +183,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch, {"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch, additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, false, true}, required_experiments_work_serializer_dispatch, 1, false, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
}; };
} // namespace grpc_core } // namespace grpc_core
@ -270,24 +241,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new = const char* const description_pick_first_new =
"New pick_first impl with memory reduction."; "New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}"; const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport = const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport."; "Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}"; const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit = const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short " "On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration"; "duration";
@ -325,12 +281,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}"; const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = { const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)}; static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace } // namespace
namespace grpc_core { namespace grpc_core {
@ -369,16 +319,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true}, additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new, {"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true}, additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport", {"promise_based_inproc_transport",
description_promise_based_inproc_transport, description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport, additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
required_experiments_promise_based_inproc_transport, 1, false, false}, false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0, {"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true}, false, true},
{"schedule_cancellation_over_write", {"schedule_cancellation_over_write",
@ -404,8 +348,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch, {"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch, additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, false, true}, required_experiments_work_serializer_dispatch, 1, false, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
}; };
} // namespace grpc_core } // namespace grpc_core
@ -464,24 +406,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new = const char* const description_pick_first_new =
"New pick_first impl with memory reduction."; "New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}"; const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport = const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport."; "Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}"; const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit = const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short " "On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration"; "duration";
@ -519,12 +446,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}"; const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = { const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)}; static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace } // namespace
namespace grpc_core { namespace grpc_core {
@ -563,16 +484,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true}, additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new, {"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true}, additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport", {"promise_based_inproc_transport",
description_promise_based_inproc_transport, description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport, additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
required_experiments_promise_based_inproc_transport, 1, false, false}, false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0, {"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true}, false, true},
{"schedule_cancellation_over_write", {"schedule_cancellation_over_write",
@ -598,8 +513,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch, {"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch, additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, true, true}, required_experiments_work_serializer_dispatch, 1, true, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
}; };
} // namespace grpc_core } // namespace grpc_core

@ -76,8 +76,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; } inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW #define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; } inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; } inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; } inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; } inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -90,7 +88,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE #define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; } inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; } inline bool IsWorkSerializerDispatchEnabled() { return false; }
inline bool IsCallV3Enabled() { return false; }
#elif defined(GPR_WINDOWS) #elif defined(GPR_WINDOWS)
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION #define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -114,8 +111,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; } inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW #define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; } inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; } inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; } inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; } inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -128,7 +123,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE #define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; } inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; } inline bool IsWorkSerializerDispatchEnabled() { return false; }
inline bool IsCallV3Enabled() { return false; }
#else #else
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION #define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -152,8 +146,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; } inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW #define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; } inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; } inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; } inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; } inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -167,7 +159,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; } inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_DISPATCH #define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_DISPATCH
inline bool IsWorkSerializerDispatchEnabled() { return true; } inline bool IsWorkSerializerDispatchEnabled() { return true; }
inline bool IsCallV3Enabled() { return false; }
#endif #endif
#else #else
@ -187,8 +178,6 @@ enum ExperimentIds {
kExperimentIdMultiping, kExperimentIdMultiping,
kExperimentIdPeerStateBasedFraming, kExperimentIdPeerStateBasedFraming,
kExperimentIdPickFirstNew, kExperimentIdPickFirstNew,
kExperimentIdPromiseBasedClientCall,
kExperimentIdChaoticGood,
kExperimentIdPromiseBasedInprocTransport, kExperimentIdPromiseBasedInprocTransport,
kExperimentIdRstpit, kExperimentIdRstpit,
kExperimentIdScheduleCancellationOverWrite, kExperimentIdScheduleCancellationOverWrite,
@ -199,7 +188,6 @@ enum ExperimentIds {
kExperimentIdUnconstrainedMaxQuotaBufferSize, kExperimentIdUnconstrainedMaxQuotaBufferSize,
kExperimentIdWorkSerializerClearsTimeCache, kExperimentIdWorkSerializerClearsTimeCache,
kExperimentIdWorkSerializerDispatch, kExperimentIdWorkSerializerDispatch,
kExperimentIdCallV3,
kNumExperiments kNumExperiments
}; };
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION #define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -262,14 +250,6 @@ inline bool IsPeerStateBasedFramingEnabled() {
inline bool IsPickFirstNewEnabled() { inline bool IsPickFirstNewEnabled() {
return IsExperimentEnabled(kExperimentIdPickFirstNew); return IsExperimentEnabled(kExperimentIdPickFirstNew);
} }
#define GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
inline bool IsPromiseBasedClientCallEnabled() {
return IsExperimentEnabled(kExperimentIdPromiseBasedClientCall);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_CHAOTIC_GOOD
inline bool IsChaoticGoodEnabled() {
return IsExperimentEnabled(kExperimentIdChaoticGood);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_INPROC_TRANSPORT #define GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_INPROC_TRANSPORT
inline bool IsPromiseBasedInprocTransportEnabled() { inline bool IsPromiseBasedInprocTransportEnabled() {
return IsExperimentEnabled(kExperimentIdPromiseBasedInprocTransport); return IsExperimentEnabled(kExperimentIdPromiseBasedInprocTransport);
@ -310,10 +290,6 @@ inline bool IsWorkSerializerClearsTimeCacheEnabled() {
inline bool IsWorkSerializerDispatchEnabled() { inline bool IsWorkSerializerDispatchEnabled() {
return IsExperimentEnabled(kExperimentIdWorkSerializerDispatch); return IsExperimentEnabled(kExperimentIdWorkSerializerDispatch);
} }
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_V3
inline bool IsCallV3Enabled() {
return IsExperimentEnabled(kExperimentIdCallV3);
}
extern const ExperimentMetadata g_experiment_metadata[kNumExperiments]; extern const ExperimentMetadata g_experiment_metadata[kNumExperiments];

@ -47,13 +47,6 @@
expiry: 2024/08/01 expiry: 2024/08/01
owner: vigneshbabu@google.com owner: vigneshbabu@google.com
test_tags: [] test_tags: []
- name: call_v3
description: Promise-based call version 3.
expiry: 2024/06/01
owner: ctiller@google.com
test_tags: []
requires: ["work_serializer_dispatch", "event_engine_listener", "event_engine_client"]
allow_in_fuzzing_config: false
- name: canary_client_privacy - name: canary_client_privacy
description: description:
If set, canary client privacy If set, canary client privacy
@ -61,13 +54,6 @@
owner: alishananda@google.com owner: alishananda@google.com
test_tags: [] test_tags: []
allow_in_fuzzing_config: false allow_in_fuzzing_config: false
- name: chaotic_good
description:
If set, enable the chaotic good load transport (this is mostly here for testing)
expiry: 2024/09/09
owner: ctiller@google.com
requires: [promise_based_client_call]
test_tags: [core_end2end_test]
- name: client_privacy - name: client_privacy
description: description:
If set, client privacy If set, client privacy
@ -154,14 +140,6 @@
expiry: 2024/07/30 expiry: 2024/07/30
owner: roth@google.com owner: roth@google.com
test_tags: ["lb_unit_test", "cpp_lb_end2end_test", "xds_end2end_test"] test_tags: ["lb_unit_test", "cpp_lb_end2end_test", "xds_end2end_test"]
- name: promise_based_client_call
description:
If set, use the new gRPC promise based call code when it's appropriate
(ie when all filters in a stack are promise based)
expiry: 2024/06/14
owner: ctiller@google.com
test_tags: ["core_end2end_test", "lame_client_test"]
requires: ["event_engine_listener", "event_engine_client"]
- name: promise_based_inproc_transport - name: promise_based_inproc_transport
description: description:
Use promises for the in-process transport. Use promises for the in-process transport.
@ -169,7 +147,6 @@
owner: ctiller@google.com owner: ctiller@google.com
test_tags: [] test_tags: []
allow_in_fuzzing_config: false # experiment currently crashes if enabled allow_in_fuzzing_config: false # experiment currently crashes if enabled
requires: [promise_based_client_call]
- name: rstpit - name: rstpit
description: description:
On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short duration On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short duration

@ -335,6 +335,7 @@ class DualRefCounted : public Impl {
gpr_log(GPR_INFO, "%s:%p weak_ref %d -> %d; (refs=%d)", trace_, this, gpr_log(GPR_INFO, "%s:%p weak_ref %d -> %d; (refs=%d)", trace_, this,
weak_refs, weak_refs + 1, strong_refs); weak_refs, weak_refs + 1, strong_refs);
} }
if (strong_refs == 0) CHECK_NE(weak_refs, 0u);
#else #else
refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_relaxed); refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_relaxed);
#endif #endif
@ -351,6 +352,7 @@ class DualRefCounted : public Impl {
this, location.file(), location.line(), weak_refs, weak_refs + 1, this, location.file(), location.line(), weak_refs, weak_refs + 1,
strong_refs, reason); strong_refs, reason);
} }
if (strong_refs == 0) CHECK_NE(weak_refs, 0u);
#else #else
// Use conditionally-important parameters // Use conditionally-important parameters
(void)location; (void)location;

@ -246,6 +246,15 @@ struct UnrefCallDtor {
} }
}; };
// Call the Destroy method on the object. This is useful when the object
// needs precise control of how it's deallocated.
struct UnrefCallDestroy {
template <typename T>
void operator()(T* p) const {
p->Destroy();
}
};
// A base class for reference-counted objects. // A base class for reference-counted objects.
// New objects should be created via new and start with a refcount of 1. // New objects should be created via new and start with a refcount of 1.
// When the refcount reaches 0, executes the specified UnrefBehavior. // When the refcount reaches 0, executes the specified UnrefBehavior.

@ -63,17 +63,19 @@ class SingleSetPtr {
void Reset() { Delete(p_.exchange(nullptr, std::memory_order_acq_rel)); } void Reset() { Delete(p_.exchange(nullptr, std::memory_order_acq_rel)); }
bool is_set() const { bool is_set() const {
T* p = p_.load(std::memory_order_acquire); T* p = Get();
return p != nullptr; return p != nullptr;
} }
T* Get() const { return p_.load(std::memory_order_acquire); }
T* operator->() const { T* operator->() const {
T* p = p_.load(std::memory_order_acquire); T* p = Get();
DCHECK_NE(p, nullptr); DCHECK_NE(p, nullptr);
return p; return p;
} }
T& operator*() const { return *operator->(); } T& operator*() const { return *Get(); }
private: private:
static void Delete(T* p) { static void Delete(T* p) {

@ -80,6 +80,20 @@ auto OnCancel(MainFn main_fn, CancelFn cancel_fn) {
}; };
} }
// Similar to OnCancel, but returns a factory that uses main_fn to construct the
// resulting promise. If the factory is dropped without being called, cancel_fn
// is called.
template <typename MainFn, typename CancelFn>
auto OnCancelFactory(MainFn main_fn, CancelFn cancel_fn) {
return [on_cancel =
cancel_callback_detail::Handler<CancelFn>(std::move(cancel_fn)),
main_fn = std::move(main_fn)]() mutable {
auto r = main_fn();
on_cancel.Done();
return r;
};
};
} // namespace grpc_core } // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_CANCEL_CALLBACK_H #endif // GRPC_SRC_CORE_LIB_PROMISE_CANCEL_CALLBACK_H

@ -647,8 +647,9 @@ template <typename Factory, typename OnComplete>
void Party::BulkSpawner::Spawn(absl::string_view name, Factory promise_factory, void Party::BulkSpawner::Spawn(absl::string_view name, Factory promise_factory,
OnComplete on_complete) { OnComplete on_complete) {
if (grpc_trace_promise_primitives.enabled()) { if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "%s[bulk_spawn] On %p queue %s", gpr_log(GPR_INFO, "%s[bulk_spawn] On %p queue %s (%" PRIdPTR " bytes)",
party_->DebugTag().c_str(), this, std::string(name).c_str()); party_->DebugTag().c_str(), this, std::string(name).c_str(),
sizeof(ParticipantImpl<Factory, OnComplete>));
} }
participants_[num_participants_++] = new ParticipantImpl<Factory, OnComplete>( participants_[num_participants_++] = new ParticipantImpl<Factory, OnComplete>(
name, std::move(promise_factory), std::move(on_complete)); name, std::move(promise_factory), std::move(on_complete));

File diff suppressed because it is too large Load Diff

@ -86,7 +86,7 @@ class Call : public CppImplOf<Call, grpc_call>,
public grpc_event_engine::experimental::EventEngine:: public grpc_event_engine::experimental::EventEngine::
Closure /* for deadlines */ { Closure /* for deadlines */ {
public: public:
virtual Arena* arena() = 0; Arena* arena() { return arena_.get(); }
bool is_client() const { return is_client_; } bool is_client() const { return is_client_; }
virtual bool Completed() = 0; virtual bool Completed() = 0;
@ -112,10 +112,7 @@ class Call : public CppImplOf<Call, grpc_call>,
return deadline_; return deadline_;
} }
grpc_compression_algorithm test_only_compression_algorithm() { virtual uint32_t test_only_message_flags() = 0;
return incoming_compression_algorithm_;
}
uint32_t test_only_message_flags() { return test_only_last_message_flags_; }
CompressionAlgorithmSet encodings_accepted_by_peer() { CompressionAlgorithmSet encodings_accepted_by_peer() {
return encodings_accepted_by_peer_; return encodings_accepted_by_peer_;
} }
@ -125,14 +122,20 @@ class Call : public CppImplOf<Call, grpc_call>,
virtual grpc_call_stack* call_stack() = 0; virtual grpc_call_stack* call_stack() = 0;
// Return the EventEngine used for this call's async execution. // Return the EventEngine used for this call's async execution.
virtual grpc_event_engine::experimental::EventEngine* event_engine() grpc_event_engine::experimental::EventEngine* event_engine() const {
const = 0; return event_engine_;
}
// Implementation of EventEngine::Closure, called when deadline expires // Implementation of EventEngine::Closure, called when deadline expires
void Run() final; void Run() final;
gpr_cycle_counter start_time() const { return start_time_; } gpr_cycle_counter start_time() const { return start_time_; }
void set_traced(bool traced) { traced_ = traced; }
bool traced() const { return traced_; }
virtual grpc_compression_algorithm incoming_compression_algorithm() = 0;
protected: protected:
// The maximum number of concurrent batches possible. // The maximum number of concurrent batches possible.
// Based upon the maximum number of individually queueable ops in the batch // Based upon the maximum number of individually queueable ops in the batch
@ -160,11 +163,8 @@ class Call : public CppImplOf<Call, grpc_call>,
Call* sibling_prev = nullptr; Call* sibling_prev = nullptr;
}; };
Call(bool is_client, Timestamp send_deadline, Call(bool is_client, Timestamp send_deadline, RefCountedPtr<Arena> arena,
grpc_event_engine::experimental::EventEngine* event_engine) grpc_event_engine::experimental::EventEngine* event_engine);
: send_deadline_(send_deadline),
is_client_(is_client),
event_engine_(event_engine) {}
~Call() override = default; ~Call() override = default;
ParentCall* GetOrCreateParentCall(); ParentCall* GetOrCreateParentCall();
@ -200,12 +200,6 @@ class Call : public CppImplOf<Call, grpc_call>,
// internal headers against external modification. // internal headers against external modification.
void PrepareOutgoingInitialMetadata(const grpc_op& op, void PrepareOutgoingInitialMetadata(const grpc_op& op,
grpc_metadata_batch& md); grpc_metadata_batch& md);
void NoteLastMessageFlags(uint32_t flags) {
test_only_last_message_flags_ = flags;
}
grpc_compression_algorithm incoming_compression_algorithm() const {
return incoming_compression_algorithm_;
}
void HandleCompressionAlgorithmDisabled( void HandleCompressionAlgorithmDisabled(
grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE; grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
@ -214,20 +208,22 @@ class Call : public CppImplOf<Call, grpc_call>,
virtual grpc_compression_options compression_options() = 0; virtual grpc_compression_options compression_options() = 0;
virtual void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) = 0;
private: private:
const RefCountedPtr<Arena> arena_;
std::atomic<ParentCall*> parent_call_{nullptr}; std::atomic<ParentCall*> parent_call_{nullptr};
ChildCall* child_ = nullptr; ChildCall* child_ = nullptr;
Timestamp send_deadline_; Timestamp send_deadline_;
const bool is_client_; const bool is_client_;
// flag indicating that cancellation is inherited // flag indicating that cancellation is inherited
bool cancellation_is_inherited_ = false; bool cancellation_is_inherited_ = false;
// Compression algorithm for *incoming* data // Is this call traced?
grpc_compression_algorithm incoming_compression_algorithm_ = bool traced_ = false;
GRPC_COMPRESS_NONE;
// Supported encodings (compression algorithms), a bitset. // Supported encodings (compression algorithms), a bitset.
// Always support no compression. // Always support no compression.
CompressionAlgorithmSet encodings_accepted_by_peer_{GRPC_COMPRESS_NONE}; CompressionAlgorithmSet encodings_accepted_by_peer_{GRPC_COMPRESS_NONE};
uint32_t test_only_last_message_flags_ = 0;
// Peer name is protected by a mutex because it can be accessed by the // Peer name is protected by a mutex because it can be accessed by the
// application at the same moment as it is being set by the completion // application at the same moment as it is being set by the completion
// of the recv_initial_metadata op. The mutex should be mostly uncontended. // of the recv_initial_metadata op. The mutex should be mostly uncontended.
@ -247,66 +243,6 @@ struct ArenaContextType<Call> {
static void Destroy(Call*) {} static void Destroy(Call*) {}
}; };
class BasicPromiseBasedCall;
class ServerPromiseBasedCall;
// TODO(ctiller): move more call things into this type
class CallContext {
public:
explicit CallContext(BasicPromiseBasedCall* call) : call_(call) {}
// Run some action in the call activity context. This is needed to adapt some
// legacy systems to promises, and will likely disappear once that conversion
// is complete.
void RunInContext(absl::AnyInvocable<void()> fn);
// TODO(ctiller): remove this once transport APIs are promise based
void IncrementRefCount(const char* reason = "call_context");
// TODO(ctiller): remove this once transport APIs are promise based
void Unref(const char* reason = "call_context");
RefCountedPtr<CallContext> Ref() {
IncrementRefCount();
return RefCountedPtr<CallContext>(this);
}
grpc_call_stats* call_stats() { return &call_stats_; }
gpr_atm* peer_string_atm_ptr();
gpr_cycle_counter call_start_time() { return start_time_; }
void set_traced(bool traced) { traced_ = traced; }
bool traced() const { return traced_; }
// TEMPORARY HACK
// Create a call spine object for this call.
// Said object should only be created once.
// Allows interop between the v2 call stack and the v3 (which is required by
// transports).
RefCountedPtr<CallSpineInterface> MakeCallSpine(CallArgs call_args);
grpc_call* c_call();
private:
friend class PromiseBasedCall;
// Call final info.
grpc_call_stats call_stats_;
// TODO(ctiller): remove this once transport APIs are promise based and we
// don't need refcounting here.
BasicPromiseBasedCall* const call_;
gpr_cycle_counter start_time_ = gpr_get_cycle_counter();
// Is this call traced?
bool traced_ = false;
};
template <>
struct ContextType<CallContext> {};
// TODO(ctiller): remove once call-v3 finalized
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata);
} // namespace grpc_core } // namespace grpc_core
// Create a new call based on \a args. // Create a new call based on \a args.
@ -349,11 +285,6 @@ void* grpc_call_tracer_get(grpc_call* call);
uint8_t grpc_call_is_client(grpc_call* call); uint8_t grpc_call_is_client(grpc_call* call);
// Get the estimated memory size for a call BESIDES the call stack. Combined
// with the size of the call stack, it helps estimate the arena size for the
// initial call.
size_t grpc_call_get_initial_size_estimate();
// Return an appropriate compression algorithm for the requested compression \a // Return an appropriate compression algorithm for the requested compression \a
// level in the context of \a call. // level in the context of \a call.
grpc_compression_algorithm grpc_call_compression_for_level( grpc_compression_algorithm grpc_call_compression_for_level(

@ -0,0 +1,286 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/call_utils.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/validate_metadata.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
void PublishMetadataArray(grpc_metadata_batch* md, grpc_metadata_array* array,
bool is_client) {
const auto md_count = md->count();
if (md_count > array->capacity) {
array->capacity =
std::max(array->capacity + md->count(), array->capacity * 3 / 2);
array->metadata = static_cast<grpc_metadata*>(
gpr_realloc(array->metadata, sizeof(grpc_metadata) * array->capacity));
}
PublishToAppEncoder encoder(array, md, is_client);
md->Encode(&encoder);
}
void CToMetadata(grpc_metadata* metadata, size_t count,
grpc_metadata_batch* b) {
for (size_t i = 0; i < count; i++) {
grpc_metadata* md = &metadata[i];
auto key = StringViewFromSlice(md->key);
// Filter "content-length metadata"
if (key == "content-length") continue;
b->Append(key, Slice(CSliceRef(md->value)),
[md](absl::string_view error, const Slice& value) {
gpr_log(GPR_DEBUG, "Append error: %s",
absl::StrCat("key=", StringViewFromSlice(md->key),
" error=", error,
" value=", value.as_string_view())
.c_str());
});
}
}
const char* GrpcOpTypeName(grpc_op_type op) {
switch (op) {
case GRPC_OP_SEND_INITIAL_METADATA:
return "SendInitialMetadata";
case GRPC_OP_SEND_MESSAGE:
return "SendMessage";
case GRPC_OP_SEND_STATUS_FROM_SERVER:
return "SendStatusFromServer";
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
return "SendCloseFromClient";
case GRPC_OP_RECV_MESSAGE:
return "RecvMessage";
case GRPC_OP_RECV_CLOSE_ON_SERVER:
return "RecvCloseOnServer";
case GRPC_OP_RECV_INITIAL_METADATA:
return "RecvInitialMetadata";
case GRPC_OP_RECV_STATUS_ON_CLIENT:
return "RecvStatusOnClient";
}
Crash("Unreachable");
}
////////////////////////////////////////////////////////////////////////
// WaitForCqEndOp
Poll<Empty> WaitForCqEndOp::operator()() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sWaitForCqEndOp[%p] %s",
Activity::current()->DebugTag().c_str(), this,
StateString(state_).c_str());
}
if (auto* n = absl::get_if<NotStarted>(&state_)) {
if (n->is_closure) {
ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(n->tag),
std::move(n->error));
return Empty{};
} else {
auto not_started = std::move(*n);
auto& started =
state_.emplace<Started>(GetContext<Activity>()->MakeOwningWaker());
grpc_cq_end_op(
not_started.cq, not_started.tag, std::move(not_started.error),
[](void* p, grpc_cq_completion*) {
auto started = static_cast<Started*>(p);
auto wakeup = std::move(started->waker);
started->done.store(true, std::memory_order_release);
wakeup.Wakeup();
},
&started, &started.completion);
}
}
auto& started = absl::get<Started>(state_);
if (started.done.load(std::memory_order_acquire)) {
return Empty{};
} else {
return Pending{};
}
}
std::string WaitForCqEndOp::StateString(const State& state) {
return Match(
state,
[](const NotStarted& x) {
return absl::StrFormat(
"NotStarted{is_closure=%s, tag=%p, error=%s, cq=%p}",
x.is_closure ? "true" : "false", x.tag, x.error.ToString(), x.cq);
},
[](const Started& x) {
return absl::StrFormat(
"Started{completion=%p, done=%s}", &x.completion,
x.done.load(std::memory_order_relaxed) ? "true" : "false");
},
[](const Invalid&) -> std::string { return "Invalid{}"; });
}
////////////////////////////////////////////////////////////////////////
// MessageReceiver
StatusFlag MessageReceiver::FinishRecvMessage(
ValueOrFailure<absl::optional<MessageHandle>> result) {
if (!result.ok()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received end-of-stream with error",
Activity::current()->DebugTag().c_str());
}
*recv_message_ = nullptr;
recv_message_ = nullptr;
return Failure{};
}
if (!result->has_value()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received end-of-stream",
Activity::current()->DebugTag().c_str());
}
*recv_message_ = nullptr;
recv_message_ = nullptr;
return Success{};
}
MessageHandle& message = **result;
test_only_last_message_flags_ = message->flags();
if ((message->flags() & GRPC_WRITE_INTERNAL_COMPRESS) &&
(incoming_compression_algorithm_ != GRPC_COMPRESS_NONE)) {
*recv_message_ = grpc_raw_compressed_byte_buffer_create(
nullptr, 0, incoming_compression_algorithm_);
} else {
*recv_message_ = grpc_raw_byte_buffer_create(nullptr, 0);
}
grpc_slice_buffer_move_into(message->payload()->c_slice_buffer(),
&(*recv_message_)->data.raw.slice_buffer);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received %" PRIdPTR " byte message",
Activity::current()->DebugTag().c_str(),
(*recv_message_)->data.raw.slice_buffer.length);
}
recv_message_ = nullptr;
return Success{};
}
////////////////////////////////////////////////////////////////////////
// MakeErrorString
std::string MakeErrorString(const ServerMetadata* trailing_metadata) {
std::string out = absl::StrCat(
trailing_metadata->get(GrpcStatusFromWire()).value_or(false)
? "Error received from peer"
: "Error generated by client",
"grpc_status: ",
grpc_status_code_to_string(trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN)));
if (const Slice* message =
trailing_metadata->get_pointer(GrpcMessageMetadata())) {
absl::StrAppend(&out, "\ngrpc_message: ", message->as_string_view());
}
if (auto annotations = trailing_metadata->get_pointer(GrpcStatusContext())) {
absl::StrAppend(&out, "\nStatus Context:");
for (const std::string& annotation : *annotations) {
absl::StrAppend(&out, "\n ", annotation);
}
}
return out;
}
bool ValidateMetadata(size_t count, grpc_metadata* metadata) {
if (count > INT_MAX) {
return false;
}
for (size_t i = 0; i < count; i++) {
grpc_metadata* md = &metadata[i];
if (!GRPC_LOG_IF_ERROR("validate_metadata",
grpc_validate_header_key_is_legal(md->key))) {
return false;
} else if (!grpc_is_binary_header_internal(md->key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata",
grpc_validate_header_nonbin_value_is_legal(md->value))) {
return false;
} else if (GRPC_SLICE_LENGTH(md->value) >= UINT32_MAX) {
// HTTP2 hpack encoding has a maximum limit.
return false;
}
}
return true;
}
void EndOpImmediately(grpc_completion_queue* cq, void* notify_tag,
bool is_notify_tag_closure) {
if (!is_notify_tag_closure) {
CHECK(grpc_cq_begin_op(cq, notify_tag));
grpc_cq_end_op(
cq, notify_tag, absl::OkStatus(),
[](void*, grpc_cq_completion* completion) { gpr_free(completion); },
nullptr,
static_cast<grpc_cq_completion*>(
gpr_malloc(sizeof(grpc_cq_completion))));
} else {
Closure::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(notify_tag),
absl::OkStatus());
}
}
} // namespace grpc_core

@ -0,0 +1,457 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H
#define GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/cancel_callback.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/seq.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/message.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
class PublishToAppEncoder {
public:
explicit PublishToAppEncoder(grpc_metadata_array* dest,
const grpc_metadata_batch* encoding,
bool is_client)
: dest_(dest), encoding_(encoding), is_client_(is_client) {}
void Encode(const Slice& key, const Slice& value) {
Append(key.c_slice(), value.c_slice());
}
// Catch anything that is not explicitly handled, and do not publish it to the
// application. If new metadata is added to a batch that needs to be
// published, it should be called out here.
template <typename Which>
void Encode(Which, const typename Which::ValueType&) {}
void Encode(UserAgentMetadata, const Slice& slice) {
Append(UserAgentMetadata::key(), slice);
}
void Encode(HostMetadata, const Slice& slice) {
Append(HostMetadata::key(), slice);
}
void Encode(GrpcPreviousRpcAttemptsMetadata, uint32_t count) {
Append(GrpcPreviousRpcAttemptsMetadata::key(), count);
}
void Encode(GrpcRetryPushbackMsMetadata, Duration count) {
Append(GrpcRetryPushbackMsMetadata::key(), count.millis());
}
void Encode(LbTokenMetadata, const Slice& slice) {
Append(LbTokenMetadata::key(), slice);
}
private:
void Append(absl::string_view key, int64_t value) {
Append(StaticSlice::FromStaticString(key).c_slice(),
Slice::FromInt64(value).c_slice());
}
void Append(absl::string_view key, const Slice& value) {
Append(StaticSlice::FromStaticString(key).c_slice(), value.c_slice());
}
void Append(grpc_slice key, grpc_slice value) {
if (dest_->count == dest_->capacity) {
Crash(absl::StrCat(
"Too many metadata entries: capacity=", dest_->capacity, " on ",
is_client_ ? "client" : "server", " encoding ", encoding_->count(),
" elements: ", encoding_->DebugString().c_str()));
}
auto* mdusr = &dest_->metadata[dest_->count++];
mdusr->key = key;
mdusr->value = value;
}
grpc_metadata_array* const dest_;
const grpc_metadata_batch* const encoding_;
const bool is_client_;
};
void PublishMetadataArray(grpc_metadata_batch* md, grpc_metadata_array* array,
bool is_client);
void CToMetadata(grpc_metadata* metadata, size_t count, grpc_metadata_batch* b);
const char* GrpcOpTypeName(grpc_op_type op);
bool ValidateMetadata(size_t count, grpc_metadata* metadata);
void EndOpImmediately(grpc_completion_queue* cq, void* notify_tag,
bool is_notify_tag_closure);
inline bool AreWriteFlagsValid(uint32_t flags) {
// check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set
const uint32_t allowed_write_positions =
(GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
const uint32_t invalid_positions = ~allowed_write_positions;
return !(flags & invalid_positions);
}
inline bool AreInitialMetadataFlagsValid(uint32_t flags) {
// check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set
uint32_t invalid_positions = ~GRPC_INITIAL_METADATA_USED_MASK;
return !(flags & invalid_positions);
}
// One batch operation
// Wrapper around promise steps to perform once of the batch operations for the
// legacy grpc surface api.
template <typename SetupResult, grpc_op_type kOp>
class OpHandlerImpl {
public:
using PromiseFactory = promise_detail::OncePromiseFactory<void, SetupResult>;
using Promise = typename PromiseFactory::Promise;
static_assert(!std::is_same<Promise, void>::value,
"PromiseFactory must return a promise");
OpHandlerImpl() : state_(State::kDismissed) {}
explicit OpHandlerImpl(SetupResult result) : state_(State::kPromiseFactory) {
Construct(&promise_factory_, std::move(result));
}
~OpHandlerImpl() {
switch (state_) {
case State::kDismissed:
break;
case State::kPromiseFactory:
Destruct(&promise_factory_);
break;
case State::kPromise:
Destruct(&promise_);
break;
}
}
OpHandlerImpl(const OpHandlerImpl&) = delete;
OpHandlerImpl& operator=(const OpHandlerImpl&) = delete;
OpHandlerImpl(OpHandlerImpl&& other) noexcept : state_(other.state_) {
switch (state_) {
case State::kDismissed:
break;
case State::kPromiseFactory:
Construct(&promise_factory_, std::move(other.promise_factory_));
break;
case State::kPromise:
Construct(&promise_, std::move(other.promise_));
break;
}
}
OpHandlerImpl& operator=(OpHandlerImpl&& other) noexcept = delete;
Poll<StatusFlag> operator()() {
switch (state_) {
case State::kDismissed:
return Success{};
case State::kPromiseFactory: {
auto promise = promise_factory_.Make();
Destruct(&promise_factory_);
Construct(&promise_, std::move(promise));
state_ = State::kPromise;
}
ABSL_FALLTHROUGH_INTENDED;
case State::kPromise: {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%sBeginPoll %s",
Activity::current()->DebugTag().c_str(), OpName());
}
auto r = poll_cast<StatusFlag>(promise_());
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_INFO, "%sEndPoll %s --> %s",
Activity::current()->DebugTag().c_str(), OpName(),
r.pending() ? "PENDING" : (r.value().ok() ? "OK" : "FAILURE"));
}
return r;
}
}
GPR_UNREACHABLE_CODE(return Pending{});
}
private:
enum class State {
kDismissed,
kPromiseFactory,
kPromise,
};
static const char* OpName() { return GrpcOpTypeName(kOp); }
// gcc-12 has problems with this being a variant
GPR_NO_UNIQUE_ADDRESS State state_;
union {
PromiseFactory promise_factory_;
Promise promise_;
};
};
template <grpc_op_type op_type, typename PromiseFactory>
auto OpHandler(PromiseFactory setup) {
return OpHandlerImpl<PromiseFactory, op_type>(std::move(setup));
}
class BatchOpIndex {
public:
BatchOpIndex(const grpc_op* ops, size_t nops) : ops_(ops) {
for (size_t i = 0; i < nops; i++) {
idxs_[ops[i].op] = static_cast<uint8_t>(i);
}
}
// 1. Check if op_type is in the batch
// 2. If it is, run the setup function in the context of the API call (NOT in
// the call party).
// 3. This setup function returns a promise factory which we'll then run *in*
// the party to do initial setup, and have it return the promise that we'll
// ultimately poll on til completion.
// Once we express our surface API in terms of core internal types this whole
// dance will go away.
template <grpc_op_type op_type, typename SetupFn>
auto OpHandler(SetupFn setup) {
using SetupResult = decltype(std::declval<SetupFn>()(grpc_op()));
using Impl = OpHandlerImpl<SetupResult, op_type>;
if (const grpc_op* op = this->op(op_type)) {
auto r = setup(*op);
return Impl(std::move(r));
} else {
return Impl();
}
}
const grpc_op* op(grpc_op_type op_type) const {
return idxs_[op_type] == 255 ? nullptr : &ops_[idxs_[op_type]];
}
private:
const grpc_op* const ops_;
std::array<uint8_t, 8> idxs_{255, 255, 255, 255, 255, 255, 255, 255};
};
// Defines a promise that calls grpc_cq_end_op() (on first poll) and then waits
// for the callback supplied to grpc_cq_end_op() to be called, before resolving
// to Empty{}
class WaitForCqEndOp {
public:
WaitForCqEndOp(bool is_closure, void* tag, grpc_error_handle error,
grpc_completion_queue* cq)
: state_{NotStarted{is_closure, tag, std::move(error), cq}} {}
Poll<Empty> operator()();
WaitForCqEndOp(const WaitForCqEndOp&) = delete;
WaitForCqEndOp& operator=(const WaitForCqEndOp&) = delete;
WaitForCqEndOp(WaitForCqEndOp&& other) noexcept
: state_(std::move(absl::get<NotStarted>(other.state_))) {
other.state_.emplace<Invalid>();
}
WaitForCqEndOp& operator=(WaitForCqEndOp&& other) noexcept {
state_ = std::move(absl::get<NotStarted>(other.state_));
other.state_.emplace<Invalid>();
return *this;
}
private:
struct NotStarted {
bool is_closure;
void* tag;
grpc_error_handle error;
grpc_completion_queue* cq;
};
struct Started {
explicit Started(Waker waker) : waker(std::move(waker)) {}
Waker waker;
grpc_cq_completion completion;
std::atomic<bool> done{false};
};
struct Invalid {};
using State = absl::variant<NotStarted, Started, Invalid>;
static std::string StateString(const State& state);
State state_{Invalid{}};
};
template <typename FalliblePart, typename FinalPart>
auto InfallibleBatch(FalliblePart fallible_part, FinalPart final_part,
bool is_notify_tag_closure, void* notify_tag,
grpc_completion_queue* cq) {
// Perform fallible_part, then final_part, then wait for the
// completion queue to be done.
// If cancelled, we'll ensure the completion queue is notified.
// There's a slight bug here in that if we cancel this promise after
// the WaitForCqEndOp we'll double post -- but we don't currently do that.
return OnCancelFactory(
[fallible_part = std::move(fallible_part),
final_part = std::move(final_part), is_notify_tag_closure, notify_tag,
cq]() mutable {
return LogPollBatch(notify_tag,
Seq(std::move(fallible_part), std::move(final_part),
[is_notify_tag_closure, notify_tag, cq]() {
return WaitForCqEndOp(is_notify_tag_closure,
notify_tag,
absl::OkStatus(), cq);
}));
},
[cq, notify_tag]() {
grpc_cq_end_op(
cq, notify_tag, absl::OkStatus(),
[](void*, grpc_cq_completion* completion) { delete completion; },
nullptr, new grpc_cq_completion);
});
}
template <typename FalliblePart>
auto FallibleBatch(FalliblePart fallible_part, bool is_notify_tag_closure,
void* notify_tag, grpc_completion_queue* cq) {
// Perform fallible_part, then wait for the completion queue to be done.
// If cancelled, we'll ensure the completion queue is notified.
// There's a slight bug here in that if we cancel this promise after
// the WaitForCqEndOp we'll double post -- but we don't currently do that.
return OnCancelFactory(
[fallible_part = std::move(fallible_part), is_notify_tag_closure,
notify_tag, cq]() mutable {
return LogPollBatch(
notify_tag,
Seq(std::move(fallible_part),
[is_notify_tag_closure, notify_tag, cq](StatusFlag r) {
return WaitForCqEndOp(is_notify_tag_closure, notify_tag,
StatusCast<absl::Status>(r), cq);
}));
},
[cq]() {
grpc_cq_end_op(
cq, nullptr, absl::CancelledError(),
[](void*, grpc_cq_completion* completion) { delete completion; },
nullptr, new grpc_cq_completion);
});
}
template <typename F>
class PollBatchLogger {
public:
PollBatchLogger(void* tag, F f) : tag_(tag), f_(std::move(f)) {}
auto operator()() {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "Poll batch %p", tag_);
}
auto r = f_();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "Poll batch %p --> %s", tag_, ResultString(r).c_str());
}
return r;
}
private:
template <typename T>
static std::string ResultString(Poll<T> r) {
if (r.pending()) return "PENDING";
return ResultString(r.value());
}
static std::string ResultString(Empty) { return "DONE"; }
void* tag_;
F f_;
};
template <typename F>
PollBatchLogger<F> LogPollBatch(void* tag, F f) {
return PollBatchLogger<F>(tag, std::move(f));
}
class MessageReceiver {
public:
grpc_compression_algorithm incoming_compression_algorithm() const {
return incoming_compression_algorithm_;
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm incoming_compression_algorithm) {
incoming_compression_algorithm_ = incoming_compression_algorithm;
}
uint32_t last_message_flags() const { return test_only_last_message_flags_; }
template <typename Puller>
auto MakeBatchOp(const grpc_op& op, Puller* puller) {
CHECK_EQ(recv_message_, nullptr);
recv_message_ = op.data.recv_message.recv_message;
return [this, puller]() mutable {
return Map(puller->PullMessage(),
[this](ValueOrFailure<absl::optional<MessageHandle>> msg) {
return FinishRecvMessage(std::move(msg));
});
};
}
private:
StatusFlag FinishRecvMessage(
ValueOrFailure<absl::optional<MessageHandle>> result);
grpc_byte_buffer** recv_message_ = nullptr;
uint32_t test_only_last_message_flags_ = 0;
// Compression algorithm for incoming data
grpc_compression_algorithm incoming_compression_algorithm_ =
GRPC_COMPRESS_NONE;
};
std::string MakeErrorString(const ServerMetadata* trailing_metadata);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H

@ -44,6 +44,7 @@
#include "src/core/lib/slice/slice.h" #include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/channel_stack_type.h" #include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/call_arena_allocator.h" #include "src/core/lib/transport/call_arena_allocator.h"
#include "src/core/lib/transport/call_destination.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
// Forward declaration to avoid dependency loop. // Forward declaration to avoid dependency loop.
@ -54,7 +55,7 @@ namespace grpc_core {
// Forward declaration to avoid dependency loop. // Forward declaration to avoid dependency loop.
class Transport; class Transport;
class Channel : public InternallyRefCounted<Channel>, class Channel : public UnstartedCallDestination,
public CppImplOf<Channel, grpc_channel> { public CppImplOf<Channel, grpc_channel> {
public: public:
struct RegisteredCall { struct RegisteredCall {
@ -68,18 +69,6 @@ class Channel : public InternallyRefCounted<Channel>,
~RegisteredCall(); ~RegisteredCall();
}; };
// Though internally ref counted channels expose their "Ref" method to
// create a RefCountedPtr to themselves. The OrphanablePtr owner is the
// singleton decision maker on whether the channel should be destroyed or
// not.
// TODO(ctiller): in a future change (I have it written) these will be removed
// and substituted with DualRefCounted<Channel> as a base.
RefCountedPtr<Channel> Ref() { return InternallyRefCounted<Channel>::Ref(); }
template <typename T>
RefCountedPtr<T> RefAsSubclass() {
return InternallyRefCounted<Channel>::RefAsSubclass<T>();
}
virtual bool IsLame() const = 0; virtual bool IsLame() const = 0;
// TODO(roth): This should return a C++ type. // TODO(roth): This should return a C++ type.
@ -164,7 +153,7 @@ class Channel : public InternallyRefCounted<Channel>,
/// The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so /// The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
/// is safe to use from within core. /// is safe to use from within core.
inline void grpc_channel_destroy_internal(grpc_channel* channel) { inline void grpc_channel_destroy_internal(grpc_channel* channel) {
grpc_core::Channel::FromC(channel)->Orphan(); grpc_core::Channel::FromC(channel)->Unref();
} }
// Return the channel's compression options. // Return the channel's compression options.

@ -14,6 +14,8 @@
// limitations under the License. // limitations under the License.
// //
#include "src/core/lib/surface/channel_create.h"
#include "absl/log/check.h" #include "absl/log/check.h"
#include <grpc/grpc.h> #include <grpc/grpc.h>
@ -34,7 +36,7 @@
namespace grpc_core { namespace grpc_core {
absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate( absl::StatusOr<RefCountedPtr<Channel>> ChannelCreate(
std::string target, ChannelArgs args, std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type, Transport* optional_transport) { grpc_channel_stack_type channel_stack_type, Transport* optional_transport) {
global_stats().IncrementClientChannelsCreated(); global_stats().IncrementClientChannelsCreated();
@ -80,7 +82,7 @@ absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate(
args = args.SetObject(optional_transport); args = args.SetObject(optional_transport);
} }
// Delegate to appropriate channel impl. // Delegate to appropriate channel impl.
if (!IsCallV3Enabled()) { if (!args.GetBool(GRPC_ARG_USE_V3_STACK).value_or(false)) {
return LegacyChannel::Create(std::move(target), std::move(args), return LegacyChannel::Create(std::move(target), std::move(args),
channel_stack_type); channel_stack_type);
} }

@ -24,16 +24,17 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_stack_type.h" #include "src/core/lib/surface/channel_stack_type.h"
#define GRPC_ARG_USE_V3_STACK "grpc.internal.use_v3_stack"
namespace grpc_core { namespace grpc_core {
class Transport; class Transport;
// Creates a client channel. // Creates a client channel.
absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate( absl::StatusOr<RefCountedPtr<Channel>> ChannelCreate(
std::string target, ChannelArgs args, std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type, Transport* optional_transport); grpc_channel_stack_type channel_stack_type, Transport* optional_transport);

@ -251,8 +251,8 @@ ChannelInit::StackConfig ChannelInit::BuildStackConfig(
MutexLock lock(m); MutexLock lock(m);
// List the channel stack type (since we'll be repeatedly printing graphs in // List the channel stack type (since we'll be repeatedly printing graphs in
// this loop). // this loop).
gpr_log(GPR_INFO, LOG(INFO) << "ORDERED CHANNEL STACK "
"ORDERED CHANNEL STACK %s:", grpc_channel_stack_type_string(type)); << grpc_channel_stack_type_string(type) << ":";
// First build up a map of filter -> file:line: strings, because it helps // First build up a map of filter -> file:line: strings, because it helps
// the readability of this log to get later fields aligned vertically. // the readability of this log to get later fields aligned vertically.
std::map<const grpc_channel_filter*, std::string> loc_strs; std::map<const grpc_channel_filter*, std::string> loc_strs;

@ -0,0 +1,423 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/client_call.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/bitset.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/all_ok.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
namespace grpc_core {
namespace {
grpc_call_error ValidateClientBatch(const grpc_op* ops, size_t nops) {
BitSet<8> got_ops;
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!AreInitialMetadataFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
if (!ValidateMetadata(op.data.send_initial_metadata.count,
op.data.send_initial_metadata.metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_SEND_MESSAGE:
if (!AreWriteFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
case GRPC_OP_SEND_STATUS_FROM_SERVER:
return GRPC_CALL_ERROR_NOT_ON_CLIENT;
}
if (got_ops.is_set(op.op)) return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
got_ops.set(op.op);
}
return GRPC_CALL_OK;
}
} // namespace
ClientCall::ClientCall(
grpc_call*, uint32_t, grpc_completion_queue* cq, Slice path,
absl::optional<Slice> authority, bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination)
: Call(false, deadline, std::move(arena), event_engine),
cq_(cq),
call_destination_(std::move(destination)),
compression_options_(compression_options) {
global_stats().IncrementClientCallsCreated();
send_initial_metadata_->Set(HttpPathMetadata(), std::move(path));
if (authority.has_value()) {
send_initial_metadata_->Set(HttpAuthorityMetadata(), std::move(*authority));
}
send_initial_metadata_->Set(
GrpcRegisteredMethod(),
reinterpret_cast<void*>(static_cast<uintptr_t>(registered_method)));
if (deadline != Timestamp::InfFuture()) {
send_initial_metadata_->Set(GrpcTimeoutMetadata(), deadline);
UpdateDeadline(deadline);
}
}
grpc_call_error ClientCall::StartBatch(const grpc_op* ops, size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 0) {
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
const grpc_call_error validation_result = ValidateClientBatch(ops, nops);
if (validation_result != GRPC_CALL_OK) {
return validation_result;
}
CommitBatch(ops, nops, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
void ClientCall::CancelWithError(grpc_error_handle error) {
cancel_status_.Set(new absl::Status(error));
auto cur_state = call_state_.load(std::memory_order_acquire);
while (true) {
if (grpc_call_trace.enabled()) {
LOG(INFO) << DebugTag() << "CancelWithError "
<< GRPC_DUMP_ARGS(cur_state, error);
}
switch (cur_state) {
case kCancelled:
return;
case kUnstarted:
if (call_state_.compare_exchange_strong(cur_state, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
return;
}
break;
case kStarted:
started_call_initiator_.SpawnInfallible(
"CancelWithError", [self = WeakRefAsSubclass<ClientCall>(),
error = std::move(error)]() mutable {
self->started_call_initiator_.Cancel(std::move(error));
return Empty{};
});
return;
default:
if (call_state_.compare_exchange_strong(cur_state, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
auto* unordered_start = reinterpret_cast<UnorderedStart*>(cur_state);
while (unordered_start != nullptr) {
auto next = unordered_start->next;
delete unordered_start;
unordered_start = next;
}
return;
}
}
}
}
template <typename Batch>
void ClientCall::ScheduleCommittedBatch(Batch batch) {
auto cur_state = call_state_.load(std::memory_order_acquire);
while (true) {
switch (cur_state) {
case kUnstarted:
default: { // UnorderedStart
auto pending = std::make_unique<UnorderedStart>();
pending->start_pending_batch = [this,
batch = std::move(batch)]() mutable {
started_call_initiator_.SpawnInfallible("batch", std::move(batch));
};
while (true) {
pending->next = reinterpret_cast<UnorderedStart*>(cur_state);
if (call_state_.compare_exchange_strong(
cur_state, reinterpret_cast<uintptr_t>(pending.get()),
std::memory_order_acq_rel, std::memory_order_acquire)) {
std::ignore = pending.release();
return;
}
if (cur_state == kStarted) {
pending->start_pending_batch();
return;
}
if (cur_state == kCancelled) {
return;
}
}
}
case kStarted:
started_call_initiator_.SpawnInfallible("batch", std::move(batch));
return;
case kCancelled:
return;
}
}
}
void ClientCall::StartCall(const grpc_op& send_initial_metadata_op) {
auto cur_state = call_state_.load(std::memory_order_acquire);
CToMetadata(send_initial_metadata_op.data.send_initial_metadata.metadata,
send_initial_metadata_op.data.send_initial_metadata.count,
send_initial_metadata_.get());
PrepareOutgoingInitialMetadata(send_initial_metadata_op,
*send_initial_metadata_);
auto call = MakeCallPair(std::move(send_initial_metadata_), event_engine(),
arena()->Ref());
started_call_initiator_ = std::move(call.initiator);
call_destination_->StartCall(std::move(call.handler));
while (true) {
switch (cur_state) {
case kUnstarted:
if (call_state_.compare_exchange_strong(cur_state, kStarted,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
return;
}
break;
case kStarted:
Crash("StartCall called twice"); // probably we crash earlier...
case kCancelled:
return;
default: { // UnorderedStart
if (call_state_.compare_exchange_strong(cur_state, kStarted,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
auto unordered_start = reinterpret_cast<UnorderedStart*>(cur_state);
while (unordered_start->next != nullptr) {
unordered_start->start_pending_batch();
auto next = unordered_start->next;
delete unordered_start;
unordered_start = next;
}
return;
}
break;
}
}
}
}
void ClientCall::CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 1 && ops[0].op == GRPC_OP_SEND_INITIAL_METADATA) {
StartCall(ops[0]);
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return;
}
if (!is_notify_tag_closure) grpc_cq_begin_op(cq_, notify_tag);
BatchOpIndex op_index(ops, nops);
auto send_message =
op_index.OpHandler<GRPC_OP_SEND_MESSAGE>([this](const grpc_op& op) {
SliceBuffer send;
grpc_slice_buffer_swap(
&op.data.send_message.send_message->data.raw.slice_buffer,
send.c_slice_buffer());
auto msg = arena()->MakePooled<Message>(std::move(send), op.flags);
return [this, msg = std::move(msg)]() mutable {
return started_call_initiator_.PushMessage(std::move(msg));
};
});
auto send_close_from_client =
op_index.OpHandler<GRPC_OP_SEND_CLOSE_FROM_CLIENT>(
[this](const grpc_op&) {
return [this]() {
started_call_initiator_.FinishSends();
return Success{};
};
});
auto recv_message =
op_index.OpHandler<GRPC_OP_RECV_MESSAGE>([this](const grpc_op& op) {
return message_receiver_.MakeBatchOp(op, &started_call_initiator_);
});
auto recv_initial_metadata =
op_index.OpHandler<GRPC_OP_RECV_INITIAL_METADATA>([this](
const grpc_op& op) {
return [this,
array = op.data.recv_initial_metadata.recv_initial_metadata]() {
return Map(
started_call_initiator_.PullServerInitialMetadata(),
[this,
array](ValueOrFailure<absl::optional<ServerMetadataHandle>> md) {
ServerMetadataHandle metadata;
if (!md.ok() || !md->has_value()) {
is_trailers_only_ = true;
metadata = Arena::MakePooled<ServerMetadata>();
} else {
metadata = std::move(md->value());
is_trailers_only_ =
metadata->get(GrpcTrailersOnly()).value_or(false);
}
ProcessIncomingInitialMetadata(*metadata);
PublishMetadataArray(metadata.get(), array, true);
received_initial_metadata_ = std::move(metadata);
return Success{};
});
};
});
auto primary_ops = AllOk<StatusFlag>(
TrySeq(std::move(send_message), std::move(send_close_from_client)),
TrySeq(std::move(recv_initial_metadata), std::move(recv_message)));
if (const grpc_op* op = op_index.op(GRPC_OP_SEND_INITIAL_METADATA)) {
StartCall(*op);
}
if (const grpc_op* op = op_index.op(GRPC_OP_RECV_STATUS_ON_CLIENT)) {
auto out_status = op->data.recv_status_on_client.status;
auto out_status_details = op->data.recv_status_on_client.status_details;
auto out_error_string = op->data.recv_status_on_client.error_string;
auto out_trailing_metadata =
op->data.recv_status_on_client.trailing_metadata;
auto make_read_trailing_metadata = [this, out_status, out_status_details,
out_error_string,
out_trailing_metadata]() {
return Map(
started_call_initiator_.PullServerTrailingMetadata(),
[this, out_status, out_status_details, out_error_string,
out_trailing_metadata](
ServerMetadataHandle server_trailing_metadata) {
if (grpc_call_trace.enabled()) {
LOG(INFO) << DebugTag() << "RecvStatusOnClient "
<< server_trailing_metadata->DebugString();
}
const auto status =
server_trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN);
*out_status = status;
Slice message_slice;
if (Slice* message = server_trailing_metadata->get_pointer(
GrpcMessageMetadata())) {
message_slice = message->Ref();
}
*out_status_details = message_slice.TakeCSlice();
if (out_error_string != nullptr) {
if (status != GRPC_STATUS_OK) {
*out_error_string = gpr_strdup(
MakeErrorString(server_trailing_metadata.get()).c_str());
} else {
*out_error_string = nullptr;
}
}
PublishMetadataArray(server_trailing_metadata.get(),
out_trailing_metadata, true);
received_trailing_metadata_ = std::move(server_trailing_metadata);
return Success{};
});
};
ScheduleCommittedBatch(InfallibleBatch(
std::move(primary_ops),
OpHandler<GRPC_OP_RECV_STATUS_ON_CLIENT>(OnCancelFactory(
std::move(make_read_trailing_metadata),
[this, out_status, out_status_details, out_error_string,
out_trailing_metadata]() {
auto* status = cancel_status_.Get();
CHECK_NE(status, nullptr);
*out_status = static_cast<grpc_status_code>(status->code());
*out_status_details =
Slice::FromCopiedString(status->message()).TakeCSlice();
if (out_error_string != nullptr) {
*out_error_string = nullptr;
}
out_trailing_metadata->count = 0;
})),
is_notify_tag_closure, notify_tag, cq_));
} else {
ScheduleCommittedBatch(FallibleBatch(
std::move(primary_ops), is_notify_tag_closure, notify_tag, cq_));
}
}
char* ClientCall::GetPeer() {
Slice peer_slice = GetPeerString();
if (!peer_slice.empty()) {
absl::string_view peer_string_view = peer_slice.as_string_view();
char* peer_string =
static_cast<char*>(gpr_malloc(peer_string_view.size() + 1));
memcpy(peer_string, peer_string_view.data(), peer_string_view.size());
peer_string[peer_string_view.size()] = '\0';
return peer_string;
}
return gpr_strdup("unknown");
}
grpc_call* MakeClientCall(
grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path, absl::optional<Slice> authority,
bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination) {
return arena
->New<ClientCall>(parent_call, propagation_mask, cq, std::move(path),
std::move(authority), registered_method, deadline,
compression_options, event_engine, arena, destination)
->c_ptr();
}
} // namespace grpc_core

@ -0,0 +1,179 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <atomic>
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/single_set_ptr.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_utils.h"
#include "src/core/lib/transport/metadata.h"
namespace grpc_core {
class ClientCall final
: public Call,
public DualRefCounted<ClientCall, NonPolymorphicRefCount, UnrefCallDestroy> {
public:
ClientCall(grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path,
absl::optional<Slice> authority, bool registered_method,
Timestamp deadline, grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination);
void CancelWithError(grpc_error_handle error) override;
bool is_trailers_only() const override { return is_trailers_only_; }
absl::string_view GetServerAuthority() const override {
Crash("unimplemented");
}
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { Ref().release(); }
void ExternalUnref() override { Unref(); }
void InternalRef(const char*) override { WeakRef().release(); }
void InternalUnref(const char*) override { WeakUnref(); }
void Orphaned() override {
// TODO(ctiller): only when we're not already finished
CancelWithError(absl::CancelledError());
}
void SetCompletionQueue(grpc_completion_queue*) override {
Crash("unimplemented");
}
grpc_compression_options compression_options() override {
return compression_options_;
}
grpc_call_stack* call_stack() override { return nullptr; }
char* GetPeer() override;
bool Completed() final { Crash("unimplemented"); }
bool failed_before_recv_message() const final { Crash("unimplemented"); }
grpc_compression_algorithm incoming_compression_algorithm() override {
return message_receiver_.incoming_compression_algorithm();
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
message_receiver_.SetIncomingCompressionAlgorithm(algorithm);
}
uint32_t test_only_message_flags() override {
return message_receiver_.last_message_flags();
}
void Destroy() {
auto arena = this->arena()->Ref();
this->~ClientCall();
}
private:
struct UnorderedStart {
absl::AnyInvocable<void()> start_pending_batch;
UnorderedStart* next;
};
void CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure);
template <typename Batch>
void ScheduleCommittedBatch(Batch batch);
void StartCall(const grpc_op& send_initial_metadata_op);
std::string DebugTag() { return absl::StrFormat("CLIENT_CALL[%p]: ", this); }
// call_state_ is one of:
// 1. kUnstarted - call has not yet been started
// 2. pointer to an UnorderedStart - call has ops started, but no send initial
// metadata yet
// 3. kStarted - call has been started and call_initiator_ is ready
// 4. kCancelled - call was cancelled before starting
// In cases (1) and (2) send_initial_metadata_ is used to store the initial
// but unsent metadata.
// In case (3) started_call_initiator_ is used to store the call initiator.
// In case (4) no other state is used.
enum CallState : uintptr_t {
kUnstarted = 0,
kStarted = 1,
kCancelled = 2,
};
std::atomic<uintptr_t> call_state_{kUnstarted};
ClientMetadataHandle send_initial_metadata_{
Arena::MakePooled<ClientMetadata>()};
CallInitiator started_call_initiator_;
// Status passed to CancelWithError;
// if call_state_ == kCancelled then this is the authoritative status,
// otherwise the server trailing metadata from started_call_initiator_ is
// authoritative.
SingleSetPtr<absl::Status> cancel_status_;
MessageReceiver message_receiver_;
grpc_completion_queue* const cq_;
const RefCountedPtr<UnstartedCallDestination> call_destination_;
const grpc_compression_options compression_options_;
ServerMetadataHandle received_initial_metadata_;
ServerMetadataHandle received_trailing_metadata_;
bool is_trailers_only_;
};
grpc_call* MakeClientCall(
grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path, absl::optional<Slice> authority,
bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H

File diff suppressed because it is too large Load Diff

@ -0,0 +1,370 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <atomic>
#include <cstdint>
#include <string>
#include <vector>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/server/server_interface.h"
#include "src/core/telemetry/call_tracer.h"
#include "src/core/util/alloc.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
namespace grpc_core {
///////////////////////////////////////////////////////////////////////////////
// FilterStackCall
// To be removed once promise conversion is complete
class FilterStackCall final : public Call {
public:
~FilterStackCall() override {
gpr_free(static_cast<void*>(const_cast<char*>(final_info_.error_string)));
}
bool Completed() override {
return gpr_atm_acq_load(&received_final_op_atm_) != 0;
}
// TODO(ctiller): return absl::StatusOr<SomeSmartPointer<Call>>?
static grpc_error_handle Create(grpc_call_create_args* args,
grpc_call** out_call);
static Call* FromTopElem(grpc_call_element* elem) {
return FromCallStack(grpc_call_stack_from_top_element(elem));
}
grpc_call_stack* call_stack() override {
return reinterpret_cast<grpc_call_stack*>(
reinterpret_cast<char*>(this) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(*this)));
}
grpc_call_element* call_elem(size_t idx) {
return grpc_call_stack_element(call_stack(), idx);
}
CallCombiner* call_combiner() { return &call_combiner_; }
void CancelWithError(grpc_error_handle error) override;
void SetCompletionQueue(grpc_completion_queue* cq) override;
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { ext_ref_.Ref(); }
void ExternalUnref() override;
void InternalRef(const char* reason) override {
GRPC_CALL_STACK_REF(call_stack(), reason);
}
void InternalUnref(const char* reason) override {
GRPC_CALL_STACK_UNREF(call_stack(), reason);
}
bool is_trailers_only() const override {
bool result = is_trailers_only_;
DCHECK(!result || recv_initial_metadata_.TransportSize() == 0);
return result;
}
bool failed_before_recv_message() const override {
return call_failed_before_recv_message_;
}
uint32_t test_only_message_flags() override {
return test_only_last_message_flags_;
}
absl::string_view GetServerAuthority() const override {
const Slice* authority_metadata =
recv_initial_metadata_.get_pointer(HttpAuthorityMetadata());
if (authority_metadata == nullptr) return "";
return authority_metadata->as_string_view();
}
static size_t InitialSizeEstimate() {
return sizeof(FilterStackCall) +
sizeof(BatchControl) * kMaxConcurrentBatches;
}
char* GetPeer() final;
grpc_compression_options compression_options() override {
return channel_->compression_options();
}
void DeleteThis() {
auto arena = this->arena()->Ref();
this->~FilterStackCall();
}
Channel* channel() const { return channel_.get(); }
private:
class ScopedContext : public promise_detail::Context<Arena> {
public:
explicit ScopedContext(FilterStackCall* call)
: promise_detail::Context<Arena>(call->arena()) {}
};
static constexpr gpr_atm kRecvNone = 0;
static constexpr gpr_atm kRecvInitialMetadataFirst = 1;
enum class PendingOp {
kRecvMessage,
kRecvInitialMetadata,
kRecvTrailingMetadata,
kSends
};
static intptr_t PendingOpMask(PendingOp op) {
return static_cast<intptr_t>(1) << static_cast<intptr_t>(op);
}
static std::string PendingOpString(intptr_t pending_ops) {
std::vector<absl::string_view> pending_op_strings;
if (pending_ops & PendingOpMask(PendingOp::kRecvMessage)) {
pending_op_strings.push_back("kRecvMessage");
}
if (pending_ops & PendingOpMask(PendingOp::kRecvInitialMetadata)) {
pending_op_strings.push_back("kRecvInitialMetadata");
}
if (pending_ops & PendingOpMask(PendingOp::kRecvTrailingMetadata)) {
pending_op_strings.push_back("kRecvTrailingMetadata");
}
if (pending_ops & PendingOpMask(PendingOp::kSends)) {
pending_op_strings.push_back("kSends");
}
return absl::StrCat("{", absl::StrJoin(pending_op_strings, ","), "}");
}
struct BatchControl {
FilterStackCall* call_ = nullptr;
CallTracerAnnotationInterface* call_tracer_ = nullptr;
grpc_transport_stream_op_batch op_;
// Share memory for cq_completion and notify_tag as they are never needed
// simultaneously. Each byte used in this data structure count as six bytes
// per call, so any savings we can make are worthwhile,
// We use notify_tag to determine whether or not to send notification to the
// completion queue. Once we've made that determination, we can reuse the
// memory for cq_completion.
union {
grpc_cq_completion cq_completion;
struct {
// Any given op indicates completion by either (a) calling a closure or
// (b) sending a notification on the call's completion queue. If
// \a is_closure is true, \a tag indicates a closure to be invoked;
// otherwise, \a tag indicates the tag to be used in the notification to
// be sent to the completion queue.
void* tag;
bool is_closure;
} notify_tag;
} completion_data_;
grpc_closure start_batch_;
grpc_closure finish_batch_;
std::atomic<intptr_t> ops_pending_{0};
AtomicError batch_error_;
void set_pending_ops(uintptr_t ops) {
ops_pending_.store(ops, std::memory_order_release);
}
bool completed_batch_step(PendingOp op) {
auto mask = PendingOpMask(op);
auto r = ops_pending_.fetch_sub(mask, std::memory_order_acq_rel);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "BATCH:%p COMPLETE:%s REMAINING:%s (tag:%p)", this,
PendingOpString(mask).c_str(),
PendingOpString(r & ~mask).c_str(),
completion_data_.notify_tag.tag);
}
CHECK_NE((r & mask), 0);
return r == mask;
}
void PostCompletion();
void FinishStep(PendingOp op);
void ProcessDataAfterMetadata();
void ReceivingStreamReady(grpc_error_handle error);
void ReceivingInitialMetadataReady(grpc_error_handle error);
void ReceivingTrailingMetadataReady(grpc_error_handle error);
void FinishBatch(grpc_error_handle error);
};
FilterStackCall(RefCountedPtr<Arena> arena,
const grpc_call_create_args& args);
static void ReleaseCall(void* call, grpc_error_handle);
static void DestroyCall(void* call, grpc_error_handle);
static FilterStackCall* FromCallStack(grpc_call_stack* call_stack) {
return reinterpret_cast<FilterStackCall*>(
reinterpret_cast<char*>(call_stack) -
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(FilterStackCall)));
}
void ExecuteBatch(grpc_transport_stream_op_batch* batch,
grpc_closure* start_batch_closure);
void SetFinalStatus(grpc_error_handle error);
BatchControl* ReuseOrAllocateBatchControl(const grpc_op* ops);
bool PrepareApplicationMetadata(size_t count, grpc_metadata* metadata,
bool is_trailing);
void PublishAppMetadata(grpc_metadata_batch* b, bool is_trailing);
void RecvInitialFilter(grpc_metadata_batch* b);
void RecvTrailingFilter(grpc_metadata_batch* b,
grpc_error_handle batch_error);
grpc_compression_algorithm incoming_compression_algorithm() override {
return incoming_compression_algorithm_;
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
incoming_compression_algorithm_ = algorithm;
}
RefCountedPtr<Channel> channel_;
RefCount ext_ref_;
CallCombiner call_combiner_;
grpc_completion_queue* cq_;
grpc_polling_entity pollent_;
/// has grpc_call_unref been called
bool destroy_called_ = false;
// Trailers-only response status
bool is_trailers_only_ = false;
/// which ops are in-flight
bool sent_initial_metadata_ = false;
bool sending_message_ = false;
bool sent_final_op_ = false;
bool received_initial_metadata_ = false;
bool receiving_message_ = false;
bool requested_final_op_ = false;
gpr_atm received_final_op_atm_ = 0;
BatchControl* active_batches_[kMaxConcurrentBatches] = {};
grpc_transport_stream_op_batch_payload stream_op_payload_;
// first idx: is_receiving, second idx: is_trailing
grpc_metadata_batch send_initial_metadata_;
grpc_metadata_batch send_trailing_metadata_;
grpc_metadata_batch recv_initial_metadata_;
grpc_metadata_batch recv_trailing_metadata_;
// Buffered read metadata waiting to be returned to the application.
// Element 0 is initial metadata, element 1 is trailing metadata.
grpc_metadata_array* buffered_metadata_[2] = {};
// Call data useful used for reporting. Only valid after the call has
// completed
grpc_call_final_info final_info_;
SliceBuffer send_slice_buffer_;
absl::optional<SliceBuffer> receiving_slice_buffer_;
uint32_t receiving_stream_flags_;
uint32_t test_only_last_message_flags_ = 0;
// Compression algorithm for *incoming* data
grpc_compression_algorithm incoming_compression_algorithm_ =
GRPC_COMPRESS_NONE;
bool call_failed_before_recv_message_ = false;
grpc_byte_buffer** receiving_buffer_ = nullptr;
grpc_slice receiving_slice_ = grpc_empty_slice();
grpc_closure receiving_stream_ready_;
grpc_closure receiving_initial_metadata_ready_;
grpc_closure receiving_trailing_metadata_ready_;
// Status about operation of call
bool sent_server_trailing_metadata_ = false;
gpr_atm cancelled_with_error_ = 0;
grpc_closure release_call_;
union {
struct {
grpc_status_code* status;
grpc_slice* status_details;
const char** error_string;
} client;
struct {
int* cancelled;
// backpointer to owning server if this is a server side call.
ServerInterface* core_server;
} server;
} final_op_;
AtomicError status_error_;
// recv_state can contain one of the following values:
// RECV_NONE : : no initial metadata and messages received
// RECV_INITIAL_METADATA_FIRST : received initial metadata first
// a batch_control* : received messages first
// +------1------RECV_NONE------3-----+
// | |
// | |
// v v
// RECV_INITIAL_METADATA_FIRST receiving_stream_ready_bctlp
// | ^ | ^
// | | | |
// +-----2-----+ +-----4-----+
// For 1, 4: See receiving_initial_metadata_ready() function
// For 2, 3: See receiving_stream_ready() function
gpr_atm recv_state_ = 0;
};
// Create a new call based on \a args.
// Regardless of success or failure, always returns a valid new call into *call
//
grpc_error_handle grpc_call_create(grpc_call_create_args* args,
grpc_call** call);
// Given the top call_element, get the call object.
grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H

@ -60,7 +60,7 @@
namespace grpc_core { namespace grpc_core {
absl::StatusOr<OrphanablePtr<Channel>> LegacyChannel::Create( absl::StatusOr<RefCountedPtr<Channel>> LegacyChannel::Create(
std::string target, ChannelArgs args, std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type) { grpc_channel_stack_type channel_stack_type) {
if (grpc_channel_stack_type_is_client(channel_stack_type)) { if (grpc_channel_stack_type_is_client(channel_stack_type)) {
@ -101,18 +101,16 @@ absl::StatusOr<OrphanablePtr<Channel>> LegacyChannel::Create(
GlobalStatsPluginRegistry::GetStatsPluginsForChannel( GlobalStatsPluginRegistry::GetStatsPluginsForChannel(
experimental::StatsPluginChannelScope(target, authority)); experimental::StatsPluginChannelScope(target, authority));
} }
return MakeOrphanable<LegacyChannel>( return MakeRefCounted<LegacyChannel>(
grpc_channel_stack_type_is_client(builder.channel_stack_type()), grpc_channel_stack_type_is_client(builder.channel_stack_type()),
builder.IsPromising(), std::move(target), args, std::move(*r)); std::move(target), args, std::move(*r));
} }
LegacyChannel::LegacyChannel(bool is_client, bool is_promising, LegacyChannel::LegacyChannel(bool is_client, std::string target,
std::string target,
const ChannelArgs& channel_args, const ChannelArgs& channel_args,
RefCountedPtr<grpc_channel_stack> channel_stack) RefCountedPtr<grpc_channel_stack> channel_stack)
: Channel(std::move(target), channel_args), : Channel(std::move(target), channel_args),
is_client_(is_client), is_client_(is_client),
is_promising_(is_promising),
channel_stack_(std::move(channel_stack)) { channel_stack_(std::move(channel_stack)) {
// We need to make sure that grpc_shutdown() does not shut things down // We need to make sure that grpc_shutdown() does not shut things down
// until after the channel is destroyed. However, the channel may not // until after the channel is destroyed. However, the channel may not
@ -144,13 +142,12 @@ LegacyChannel::LegacyChannel(bool is_client, bool is_promising,
}; };
} }
void LegacyChannel::Orphan() { void LegacyChannel::Orphaned() {
grpc_transport_op* op = grpc_make_transport_op(nullptr); grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->disconnect_with_error = GRPC_ERROR_CREATE("Channel Destroyed"); op->disconnect_with_error = GRPC_ERROR_CREATE("Channel Destroyed");
grpc_channel_element* elem = grpc_channel_element* elem =
grpc_channel_stack_element(channel_stack_.get(), 0); grpc_channel_stack_element(channel_stack_.get(), 0);
elem->filter->start_transport_op(elem, op); elem->filter->start_transport_op(elem, op);
Unref();
} }
bool LegacyChannel::IsLame() const { bool LegacyChannel::IsLame() const {
@ -167,7 +164,7 @@ grpc_call* LegacyChannel::CreateCall(
CHECK(is_client_); CHECK(is_client_);
CHECK(!(cq != nullptr && pollset_set_alternative != nullptr)); CHECK(!(cq != nullptr && pollset_set_alternative != nullptr));
grpc_call_create_args args; grpc_call_create_args args;
args.channel = Ref(); args.channel = RefAsSubclass<LegacyChannel>();
args.server = nullptr; args.server = nullptr;
args.parent = parent_call; args.parent = parent_call;
args.propagation_mask = propagation_mask; args.propagation_mask = propagation_mask;
@ -204,9 +201,9 @@ bool LegacyChannel::SupportsConnectivityWatcher() const {
// A fire-and-forget object to handle external connectivity state watches. // A fire-and-forget object to handle external connectivity state watches.
class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> { class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
public: public:
StateWatcher(RefCountedPtr<LegacyChannel> channel, grpc_completion_queue* cq, StateWatcher(WeakRefCountedPtr<LegacyChannel> channel,
void* tag, grpc_connectivity_state last_observed_state, grpc_completion_queue* cq, void* tag,
Timestamp deadline) grpc_connectivity_state last_observed_state, Timestamp deadline)
: channel_(std::move(channel)), : channel_(std::move(channel)),
cq_(cq), cq_(cq),
tag_(tag), tag_(tag),
@ -313,7 +310,7 @@ class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
self->WeakUnref(); self->WeakUnref();
} }
RefCountedPtr<LegacyChannel> channel_; WeakRefCountedPtr<LegacyChannel> channel_;
grpc_completion_queue* cq_; grpc_completion_queue* cq_;
void* tag_; void* tag_;
@ -333,8 +330,8 @@ class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
void LegacyChannel::WatchConnectivityState( void LegacyChannel::WatchConnectivityState(
grpc_connectivity_state last_observed_state, Timestamp deadline, grpc_connectivity_state last_observed_state, Timestamp deadline,
grpc_completion_queue* cq, void* tag) { grpc_completion_queue* cq, void* tag) {
new StateWatcher(RefAsSubclass<LegacyChannel>(), cq, tag, last_observed_state, new StateWatcher(WeakRefAsSubclass<LegacyChannel>(), cq, tag,
deadline); last_observed_state, deadline);
} }
void LegacyChannel::AddConnectivityWatcher( void LegacyChannel::AddConnectivityWatcher(
@ -401,8 +398,7 @@ void LegacyChannel::Ping(grpc_completion_queue* cq, void* tag) {
ClientChannelFilter* LegacyChannel::GetClientChannelFilter() const { ClientChannelFilter* LegacyChannel::GetClientChannelFilter() const {
grpc_channel_element* elem = grpc_channel_element* elem =
grpc_channel_stack_last_element(channel_stack_.get()); grpc_channel_stack_last_element(channel_stack_.get());
if (elem->filter != &ClientChannelFilter::kFilterVtableWithPromises && if (elem->filter != &ClientChannelFilter::kFilter) {
elem->filter != &ClientChannelFilter::kFilterVtableWithoutPromises) {
return nullptr; return nullptr;
} }
return static_cast<ClientChannelFilter*>(elem->channel_data); return static_cast<ClientChannelFilter*>(elem->channel_data);

@ -46,16 +46,16 @@ namespace grpc_core {
class LegacyChannel final : public Channel { class LegacyChannel final : public Channel {
public: public:
static absl::StatusOr<OrphanablePtr<Channel>> Create( static absl::StatusOr<RefCountedPtr<Channel>> Create(
std::string target, ChannelArgs args, std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type); grpc_channel_stack_type channel_stack_type);
// Do not instantiate directly -- use Create() instead. // Do not instantiate directly -- use Create() instead.
LegacyChannel(bool is_client, bool is_promising, std::string target, LegacyChannel(bool is_client, std::string target,
const ChannelArgs& channel_args, const ChannelArgs& channel_args,
RefCountedPtr<grpc_channel_stack> channel_stack); RefCountedPtr<grpc_channel_stack> channel_stack);
void Orphan() override; void Orphaned() override;
bool IsLame() const override; bool IsLame() const override;
@ -65,6 +65,10 @@ class LegacyChannel final : public Channel {
absl::optional<Slice> authority, Timestamp deadline, absl::optional<Slice> authority, Timestamp deadline,
bool registered_method) override; bool registered_method) override;
void StartCall(UnstartedCallHandler) override {
Crash("StartCall() not supported on LegacyChannel");
}
grpc_event_engine::experimental::EventEngine* event_engine() const override { grpc_event_engine::experimental::EventEngine* event_engine() const override {
return channel_stack_->EventEngine(); return channel_stack_->EventEngine();
} }
@ -90,7 +94,6 @@ class LegacyChannel final : public Channel {
void Ping(grpc_completion_queue* cq, void* tag) override; void Ping(grpc_completion_queue* cq, void* tag) override;
bool is_client() const override { return is_client_; } bool is_client() const override { return is_client_; }
bool is_promising() const override { return is_promising_; }
grpc_channel_stack* channel_stack() const override { grpc_channel_stack* channel_stack() const override {
return channel_stack_.get(); return channel_stack_.get();
} }
@ -103,7 +106,6 @@ class LegacyChannel final : public Channel {
ClientChannelFilter* GetClientChannelFilter() const; ClientChannelFilter* GetClientChannelFilter() const;
const bool is_client_; const bool is_client_;
const bool is_promising_;
RefCountedPtr<grpc_channel_stack> channel_stack_; RefCountedPtr<grpc_channel_stack> channel_stack_;
}; };

@ -0,0 +1,224 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/server_call.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/bitset.h"
#include "src/core/lib/promise/all_ok.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/server/server_interface.h"
namespace grpc_core {
namespace {
grpc_call_error ValidateServerBatch(const grpc_op* ops, size_t nops) {
BitSet<8> got_ops;
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!AreInitialMetadataFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
if (!ValidateMetadata(op.data.send_initial_metadata.count,
op.data.send_initial_metadata.metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_SEND_MESSAGE:
if (!AreWriteFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!ValidateMetadata(
op.data.send_status_from_server.trailing_metadata_count,
op.data.send_status_from_server.trailing_metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_CLOSE_ON_SERVER:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
break;
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
if (got_ops.is_set(op.op)) return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
got_ops.set(op.op);
}
return GRPC_CALL_OK;
}
} // namespace
grpc_call_error ServerCall::StartBatch(const grpc_op* ops, size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 0) {
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
const grpc_call_error validation_result = ValidateServerBatch(ops, nops);
if (validation_result != GRPC_CALL_OK) {
return validation_result;
}
CommitBatch(ops, nops, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
void ServerCall::CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) {
BatchOpIndex op_index(ops, nops);
if (!is_notify_tag_closure) grpc_cq_begin_op(cq_, notify_tag);
auto send_initial_metadata =
op_index.OpHandler<GRPC_OP_SEND_INITIAL_METADATA>([this](
const grpc_op& op) {
auto metadata = arena()->MakePooled<ServerMetadata>();
PrepareOutgoingInitialMetadata(op, *metadata);
CToMetadata(op.data.send_initial_metadata.metadata,
op.data.send_initial_metadata.count, metadata.get());
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%s[call] Send initial metadata",
DebugTag().c_str());
}
return [this, metadata = std::move(metadata)]() mutable {
return call_handler_.PushServerInitialMetadata(std::move(metadata));
};
});
auto send_message =
op_index.OpHandler<GRPC_OP_SEND_MESSAGE>([this](const grpc_op& op) {
SliceBuffer send;
grpc_slice_buffer_swap(
&op.data.send_message.send_message->data.raw.slice_buffer,
send.c_slice_buffer());
auto msg = arena()->MakePooled<Message>(std::move(send), op.flags);
return [this, msg = std::move(msg)]() mutable {
return call_handler_.PushMessage(std::move(msg));
};
});
auto send_trailing_metadata =
op_index.OpHandler<GRPC_OP_SEND_STATUS_FROM_SERVER>(
[this](const grpc_op& op) {
auto metadata = arena()->MakePooled<ServerMetadata>();
CToMetadata(op.data.send_status_from_server.trailing_metadata,
op.data.send_status_from_server.trailing_metadata_count,
metadata.get());
metadata->Set(GrpcStatusMetadata(),
op.data.send_status_from_server.status);
if (auto* details =
op.data.send_status_from_server.status_details) {
// TODO(ctiller): this should not be a copy, but we have
// callers that allocate and pass in a slice created with
// grpc_slice_from_static_string and then delete the string
// after passing it in, which shouldn't be a supported API.
metadata->Set(GrpcMessageMetadata(),
Slice(grpc_slice_copy(*details)));
}
CHECK(metadata != nullptr);
return [this, metadata = std::move(metadata)]() mutable {
CHECK(metadata != nullptr);
return [this, metadata = std::move(
metadata)]() mutable -> Poll<Success> {
CHECK(metadata != nullptr);
call_handler_.PushServerTrailingMetadata(std::move(metadata));
return Success{};
};
};
});
auto recv_message =
op_index.OpHandler<GRPC_OP_RECV_MESSAGE>([this](const grpc_op& op) {
return message_receiver_.MakeBatchOp(op, &call_handler_);
});
auto primary_ops = AllOk<StatusFlag>(
TrySeq(AllOk<StatusFlag>(std::move(send_initial_metadata),
std::move(send_message)),
std::move(send_trailing_metadata)),
std::move(recv_message));
if (auto* op = op_index.op(GRPC_OP_RECV_CLOSE_ON_SERVER)) {
auto recv_trailing_metadata = OpHandler<GRPC_OP_RECV_CLOSE_ON_SERVER>(
[this, cancelled = op->data.recv_close_on_server.cancelled]() {
return Map(call_handler_.WasCancelled(),
[cancelled, this](bool result) -> Success {
ResetDeadline();
*cancelled = result ? 1 : 0;
return Success{};
});
});
call_handler_.SpawnInfallible(
"final-batch", InfallibleBatch(std::move(primary_ops),
std::move(recv_trailing_metadata),
is_notify_tag_closure, notify_tag, cq_));
} else {
call_handler_.SpawnInfallible(
"batch", FallibleBatch(std::move(primary_ops), is_notify_tag_closure,
notify_tag, cq_));
}
}
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata) {
PublishMetadataArray(client_initial_metadata.get(), publish_initial_metadata,
false);
// TODO(ctiller): ideally we'd put this in the arena with the CallHandler,
// but there's an ownership problem: CallHandler owns the arena, and so would
// get destroyed before the base class Call destructor runs, leading to
// UB/crash. Investigate another path.
return (new ServerCall(std::move(client_initial_metadata),
std::move(call_handler), server, cq))
->c_ptr();
}
} // namespace grpc_core

@ -0,0 +1,167 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/server/server_interface.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
namespace grpc_core {
class ServerCall final : public Call, public DualRefCounted<ServerCall> {
public:
ServerCall(ClientMetadataHandle client_initial_metadata,
CallHandler call_handler, ServerInterface* server,
grpc_completion_queue* cq)
: Call(false,
client_initial_metadata->get(GrpcTimeoutMetadata())
.value_or(Timestamp::InfFuture()),
call_handler.arena()->Ref(), call_handler.event_engine()),
call_handler_(std::move(call_handler)),
client_initial_metadata_stored_(std::move(client_initial_metadata)),
cq_(cq),
server_(server) {
global_stats().IncrementServerCallsCreated();
}
void CancelWithError(grpc_error_handle error) override {
call_handler_.SpawnInfallible(
"CancelWithError",
[self = WeakRefAsSubclass<ServerCall>(), error = std::move(error)] {
auto status = ServerMetadataFromStatus(error);
status->Set(GrpcCallWasCancelled(), true);
self->call_handler_.PushServerTrailingMetadata(std::move(status));
return Empty{};
});
}
bool is_trailers_only() const override {
Crash("is_trailers_only not implemented for server calls");
}
absl::string_view GetServerAuthority() const override {
Crash("unimplemented");
}
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { Ref().release(); }
void ExternalUnref() override { Unref(); }
void InternalRef(const char*) override { WeakRef().release(); }
void InternalUnref(const char*) override { WeakUnref(); }
void Orphaned() override {
// TODO(ctiller): only when we're not already finished
CancelWithError(absl::CancelledError());
}
void SetCompletionQueue(grpc_completion_queue*) override {
Crash("unimplemented");
}
grpc_compression_options compression_options() override {
return server_->compression_options();
}
grpc_call_stack* call_stack() override { return nullptr; }
char* GetPeer() override {
Slice peer_slice = GetPeerString();
if (!peer_slice.empty()) {
absl::string_view peer_string_view = peer_slice.as_string_view();
char* peer_string =
static_cast<char*>(gpr_malloc(peer_string_view.size() + 1));
memcpy(peer_string, peer_string_view.data(), peer_string_view.size());
peer_string[peer_string_view.size()] = '\0';
return peer_string;
}
return gpr_strdup("unknown");
}
bool Completed() final { Crash("unimplemented"); }
bool failed_before_recv_message() const final { Crash("unimplemented"); }
uint32_t test_only_message_flags() override {
return message_receiver_.last_message_flags();
}
grpc_compression_algorithm incoming_compression_algorithm() override {
return message_receiver_.incoming_compression_algorithm();
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
message_receiver_.SetIncomingCompressionAlgorithm(algorithm);
}
private:
void CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure);
std::string DebugTag() { return absl::StrFormat("SERVER_CALL[%p]: ", this); }
CallHandler call_handler_;
MessageReceiver message_receiver_;
ClientMetadataHandle client_initial_metadata_stored_;
grpc_completion_queue* const cq_;
ServerInterface* const server_;
};
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H

@ -1,75 +0,0 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/wait_for_cq_end_op.h"
#include <atomic>
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/promise/trace.h"
namespace grpc_core {
Poll<Empty> WaitForCqEndOp::operator()() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sWaitForCqEndOp[%p] %s",
Activity::current()->DebugTag().c_str(), this,
StateString(state_).c_str());
}
if (auto* n = absl::get_if<NotStarted>(&state_)) {
if (n->is_closure) {
ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(n->tag),
std::move(n->error));
return Empty{};
} else {
auto not_started = std::move(*n);
auto& started =
state_.emplace<Started>(GetContext<Activity>()->MakeOwningWaker());
grpc_cq_end_op(
not_started.cq, not_started.tag, std::move(not_started.error),
[](void* p, grpc_cq_completion*) {
auto started = static_cast<Started*>(p);
auto wakeup = std::move(started->waker);
started->done.store(true, std::memory_order_release);
wakeup.Wakeup();
},
&started, &started.completion);
}
}
auto& started = absl::get<Started>(state_);
if (started.done.load(std::memory_order_acquire)) {
return Empty{};
} else {
return Pending{};
}
}
std::string WaitForCqEndOp::StateString(const State& state) {
return Match(
state,
[](const NotStarted& x) {
return absl::StrFormat(
"NotStarted{is_closure=%s, tag=%p, error=%s, cq=%p}",
x.is_closure ? "true" : "false", x.tag, x.error.ToString(), x.cq);
},
[](const Started& x) {
return absl::StrFormat(
"Started{completion=%p, done=%s}", &x.completion,
x.done.load(std::memory_order_relaxed) ? "true" : "false");
},
[](const Invalid&) -> std::string { return "Invalid{}"; });
}
} // namespace grpc_core

@ -1,72 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H
#define GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/surface/completion_queue.h"
namespace grpc_core {
// Defines a promise that calls grpc_cq_end_op() (on first poll) and then waits
// for the callback supplied to grpc_cq_end_op() to be called, before resolving
// to Empty{}
class WaitForCqEndOp {
public:
WaitForCqEndOp(bool is_closure, void* tag, grpc_error_handle error,
grpc_completion_queue* cq)
: state_{NotStarted{is_closure, tag, std::move(error), cq}} {}
Poll<Empty> operator()();
WaitForCqEndOp(const WaitForCqEndOp&) = delete;
WaitForCqEndOp& operator=(const WaitForCqEndOp&) = delete;
WaitForCqEndOp(WaitForCqEndOp&& other) noexcept
: state_(std::move(absl::get<NotStarted>(other.state_))) {
other.state_.emplace<Invalid>();
}
WaitForCqEndOp& operator=(WaitForCqEndOp&& other) noexcept {
state_ = std::move(absl::get<NotStarted>(other.state_));
other.state_.emplace<Invalid>();
return *this;
}
private:
struct NotStarted {
bool is_closure;
void* tag;
grpc_error_handle error;
grpc_completion_queue* cq;
};
struct Started {
explicit Started(Waker waker) : waker(std::move(waker)) {}
Waker waker;
grpc_cq_completion completion;
std::atomic<bool> done{false};
};
struct Invalid {};
using State = absl::variant<NotStarted, Started, Invalid>;
static std::string StateString(const State& state);
State state_{Invalid{}};
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H

@ -1,171 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/batch_builder.h"
#include <type_traits>
#include "absl/log/check.h"
#include <grpc/support/port_platform.h>
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
BatchBuilder::BatchBuilder(grpc_transport_stream_op_batch_payload* payload)
: payload_(payload) {}
void BatchBuilder::PendingCompletion::CompletionCallback(
void* self, grpc_error_handle error) {
auto* pc = static_cast<PendingCompletion*>(self);
auto* party = pc->batch->party.get();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sFinish batch-component %s: status=%s",
pc->batch->DebugPrefix(party).c_str(),
std::string(pc->name()).c_str(), error.ToString().c_str());
}
party->Spawn(
"batch-completion",
[pc, error = std::move(error)]() mutable {
RefCountedPtr<Batch> batch = std::exchange(pc->batch, nullptr);
pc->done_latch.Set(std::move(error));
return Empty{};
},
[](Empty) {});
}
BatchBuilder::PendingCompletion::PendingCompletion(RefCountedPtr<Batch> batch)
: batch(std::move(batch)) {
GRPC_CLOSURE_INIT(&on_done_closure, CompletionCallback, this, nullptr);
}
BatchBuilder::Batch::Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount)
: party(GetContext<Party>()->Ref()), stream_refcount(stream_refcount) {
batch.payload = payload;
batch.is_traced = GetContext<CallContext>()->traced();
#ifndef NDEBUG
grpc_stream_ref(stream_refcount, "pending-batch");
#else
grpc_stream_ref(stream_refcount);
#endif
}
BatchBuilder::Batch::~Batch() {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] [batch %p] Destroy",
GetContext<Activity>()->DebugTag().c_str(), this);
}
delete pending_receive_message;
delete pending_receive_initial_metadata;
delete pending_receive_trailing_metadata;
delete pending_sends;
if (batch.cancel_stream) {
delete batch.payload;
}
#ifndef NDEBUG
grpc_stream_unref(stream_refcount, "pending-batch");
#else
grpc_stream_unref(stream_refcount);
#endif
}
BatchBuilder::Batch* BatchBuilder::GetBatch(Target target) {
if (target_.has_value() &&
(target_->stream != target.stream ||
target.transport->filter_stack_transport()
->HackyDisableStreamOpBatchCoalescingInConnectedChannel())) {
FlushBatch();
}
if (!target_.has_value()) {
target_ = target;
batch_ = GetContext<Arena>()->NewPooled<Batch>(payload_,
target_->stream_refcount);
}
CHECK_NE(batch_, nullptr);
return batch_;
}
void BatchBuilder::FlushBatch() {
CHECK_NE(batch_, nullptr);
CHECK(target_.has_value());
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG, "%sPerform transport stream op batch: %p %s",
batch_->DebugPrefix().c_str(), &batch_->batch,
grpc_transport_stream_op_batch_string(&batch_->batch, false).c_str());
}
std::exchange(batch_, nullptr)->PerformWith(*target_);
target_.reset();
}
void BatchBuilder::Batch::PerformWith(Target target) {
target.transport->filter_stack_transport()->PerformStreamOp(target.stream,
&batch);
}
ServerMetadataHandle BatchBuilder::CompleteSendServerTrailingMetadata(
Batch* batch, ServerMetadataHandle sent_metadata, absl::Status send_result,
bool actually_sent) {
if (!send_result.ok()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG,
"%sSend metadata failed with error: %s, fabricating trailing "
"metadata",
batch->DebugPrefix().c_str(), send_result.ToString().c_str());
}
sent_metadata->Clear();
sent_metadata->Set(GrpcStatusMetadata(),
static_cast<grpc_status_code>(send_result.code()));
sent_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(send_result.message()));
sent_metadata->Set(GrpcCallWasCancelled(), true);
}
if (!sent_metadata->get(GrpcCallWasCancelled()).has_value()) {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"%sTagging trailing metadata with cancellation status from "
"transport: %s",
batch->DebugPrefix().c_str(),
actually_sent ? "sent => not-cancelled" : "not-sent => cancelled");
}
sent_metadata->Set(GrpcCallWasCancelled(), !actually_sent);
}
return sent_metadata;
}
BatchBuilder::Batch* BatchBuilder::MakeCancel(
grpc_stream_refcount* stream_refcount, absl::Status status) {
auto* arena = GetContext<Arena>();
auto* payload = arena->NewPooled<grpc_transport_stream_op_batch_payload>();
auto* batch = arena->NewPooled<Batch>(payload, stream_refcount);
batch->batch.cancel_stream = true;
payload->cancel_stream.cancel_error = std::move(status);
return batch;
}
void BatchBuilder::Cancel(Target target, absl::Status status) {
auto* batch = MakeCancel(target.stream_refcount, std::move(status));
batch->batch.on_complete =
NewClosure([batch](absl::Status) { delete batch; });
batch->PerformWith(target);
}
} // namespace grpc_core

@ -1,474 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include <grpc/status.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/party.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
// Build up a transport stream op batch for a stream for a promise based
// connected channel.
// Offered as a context from Call, so that it can collect ALL the updates during
// a single party round, and then push them down to the transport as a single
// transaction.
class BatchBuilder {
public:
explicit BatchBuilder(grpc_transport_stream_op_batch_payload* payload);
~BatchBuilder() {
if (batch_ != nullptr) FlushBatch();
}
struct Target {
Transport* transport;
grpc_stream* stream;
grpc_stream_refcount* stream_refcount;
};
BatchBuilder(const BatchBuilder&) = delete;
BatchBuilder& operator=(const BatchBuilder&) = delete;
// Returns a promise that will resolve to a Status when the send is completed.
auto SendMessage(Target target, MessageHandle message);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientInitialMetadata(Target target, ClientMetadataHandle metadata);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendServerInitialMetadata(Target target, ServerMetadataHandle metadata);
// Returns a promise that will resolve to a ServerMetadataHandle when the send
// is completed.
//
// If convert_to_cancellation is true, then the status will be converted to a
// cancellation batch instead of a trailing metadata op in a coalesced batch.
//
// This quirk exists as in the filter based stack upon which our transports
// were written if a trailing metadata op were sent it always needed to be
// paired with an initial op batch, and the transports would wait for the
// initial metadata batch to arrive (in case of reordering up the stack).
auto SendServerTrailingMetadata(Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation);
// Returns a promise that will resolve to a StatusOr<optional<MessageHandle>>
// when a message is received.
// Error => non-ok status
// End of stream => Ok, nullopt (no message)
// Message => Ok, message
auto ReceiveMessage(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerTrailingMetadata(Target target);
// Send a cancellation: does not occupy the same payload, nor does it
// coalesce with other ops.
void Cancel(Target target, absl::Status status);
private:
struct Batch;
// Base pending operation
struct PendingCompletion {
explicit PendingCompletion(RefCountedPtr<Batch> batch);
virtual absl::string_view name() const = 0;
static void CompletionCallback(void* self, grpc_error_handle error);
grpc_closure on_done_closure;
Latch<absl::Status> done_latch;
RefCountedPtr<Batch> batch;
protected:
~PendingCompletion() = default;
};
// A pending receive message.
struct PendingReceiveMessage final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "receive_message"; }
MessageHandle IntoMessageHandle() {
return Arena::MakePooled<Message>(std::move(*payload), flags);
}
absl::optional<SliceBuffer> payload;
uint32_t flags;
bool call_failed_before_recv_message = false;
};
// A pending receive metadata.
struct PendingReceiveMetadata : public PendingCompletion {
using PendingCompletion::PendingCompletion;
Arena::PoolPtr<grpc_metadata_batch> metadata =
Arena::MakePooled<grpc_metadata_batch>();
protected:
~PendingReceiveMetadata() = default;
};
struct PendingReceiveInitialMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_initial_metadata";
}
};
struct PendingReceiveTrailingMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_trailing_metadata";
}
};
// Pending sends in a batch
struct PendingSends final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "sends"; }
MessageHandle send_message;
Arena::PoolPtr<grpc_metadata_batch> send_initial_metadata;
Arena::PoolPtr<grpc_metadata_batch> send_trailing_metadata;
bool trailing_metadata_sent = false;
};
// One outstanding batch.
struct Batch final {
Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount);
~Batch();
Batch(const Batch&) = delete;
Batch& operator=(const Batch&) = delete;
std::string DebugPrefix(Activity* activity = GetContext<Activity>()) const {
return absl::StrFormat("%s[connected] [batch %p] ", activity->DebugTag(),
this);
}
void IncrementRefCount() { ++refs; }
void Unref() {
if (--refs == 0) delete this;
}
RefCountedPtr<Batch> Ref() {
IncrementRefCount();
return RefCountedPtr<Batch>(this);
}
// Get an initialized pending completion.
// There are four pending completions potentially contained within a batch.
// They can be rather large so we don't create all of them always. Instead,
// we dynamically create them on the arena as needed.
// This method either returns the existing completion in a batch if that
// completion has already been initialized, or it creates a new completion
// and returns that.
template <typename T>
T* GetInitializedCompletion(T*(Batch::*field)) {
if (this->*field != nullptr) return this->*field;
this->*field = new T(Ref());
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sAdd batch closure for %s @ %s",
DebugPrefix().c_str(),
std::string((this->*field)->name()).c_str(),
(this->*field)->on_done_closure.DebugString().c_str());
}
return this->*field;
}
// grpc_transport_perform_stream_op on target.stream
void PerformWith(Target target);
// Take a promise, and return a promise that holds a ref on this batch until
// the promise completes or is cancelled.
template <typename P>
auto RefUntil(P promise) {
return [self = Ref(), promise = std::move(promise)]() mutable {
return promise();
};
}
grpc_transport_stream_op_batch batch;
PendingReceiveMessage* pending_receive_message = nullptr;
PendingReceiveInitialMetadata* pending_receive_initial_metadata = nullptr;
PendingReceiveTrailingMetadata* pending_receive_trailing_metadata = nullptr;
PendingSends* pending_sends = nullptr;
const RefCountedPtr<Party> party;
grpc_stream_refcount* const stream_refcount;
uint8_t refs = 0;
};
// Get a batch for the given target.
// Currently: if the current batch is for this target, return it - otherwise
// flush the batch and start a new one (and return that).
// This function may change in the future to allow multiple batches to be
// building at once (if that turns out to be useful for hedging).
Batch* GetBatch(Target target);
// Flush the current batch down to the transport.
void FlushBatch();
// Create a cancel batch with its own payload.
Batch* MakeCancel(grpc_stream_refcount* stream_refcount, absl::Status status);
// Note: we don't distinguish between client and server metadata here.
// At the time of writing they're both the same thing - and it's unclear
// whether we'll get to separate them prior to batches going away or not.
// So for now we claim YAGNI and just do the simplest possible implementation.
auto SendInitialMetadata(Target target,
Arena::PoolPtr<grpc_metadata_batch> md);
auto ReceiveInitialMetadata(Target target);
auto ReceiveTrailingMetadata(Target target);
// Combine send status and server metadata into a final status to report back
// to the containing call.
static ServerMetadataHandle CompleteSendServerTrailingMetadata(
Batch* batch, ServerMetadataHandle sent_metadata,
absl::Status send_result, bool actually_sent);
grpc_transport_stream_op_batch_payload* const payload_;
absl::optional<Target> target_;
Batch* batch_ = nullptr;
};
inline auto BatchBuilder::SendMessage(Target target, MessageHandle message) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send message: %s", batch->DebugPrefix().c_str(),
message->DebugString().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_message = true;
payload_->send_message.send_message = message->payload();
payload_->send_message.flags = message->flags();
pc->send_message = std::move(message);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendInitialMetadata(
Target target, Arena::PoolPtr<grpc_metadata_batch> md) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send initial metadata: %s",
batch->DebugPrefix().c_str(), md->DebugString().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_initial_metadata = true;
payload_->send_initial_metadata.send_initial_metadata = md.get();
pc->send_initial_metadata = std::move(md);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendClientInitialMetadata(
Target target, ClientMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendClientTrailingMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send trailing metadata",
batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_trailing_metadata = true;
auto metadata = Arena::MakePooled<grpc_metadata_batch>();
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = nullptr;
pc->send_trailing_metadata = std::move(metadata);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendServerInitialMetadata(
Target target, ServerMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendServerTrailingMetadata(
Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation) {
Batch* batch;
PendingSends* pc;
if (convert_to_cancellation) {
const auto status_code =
metadata->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
auto status = grpc_error_set_int(
absl::Status(static_cast<absl::StatusCode>(status_code),
metadata->GetOrCreatePointer(GrpcMessageMetadata())
->as_string_view()),
StatusIntProperty::kRpcStatus, status_code);
batch = MakeCancel(target.stream_refcount, std::move(status));
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
} else {
batch = GetBatch(target);
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.send_trailing_metadata = true;
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = &pc->trailing_metadata_sent;
}
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s%s: %s", batch->DebugPrefix().c_str(),
convert_to_cancellation ? "Send trailing metadata as cancellation"
: "Queue send trailing metadata",
metadata->DebugString().c_str());
}
batch->batch.on_complete = &pc->on_done_closure;
pc->send_trailing_metadata = std::move(metadata);
auto promise = Map(pc->done_latch.WaitAndCopy(),
[pc, batch = batch->Ref()](absl::Status status) {
return CompleteSendServerTrailingMetadata(
batch.get(), std::move(pc->send_trailing_metadata),
std::move(status), pc->trailing_metadata_sent);
});
if (convert_to_cancellation) {
batch->PerformWith(target);
}
return promise;
}
inline auto BatchBuilder::ReceiveMessage(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive message", batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_receive_message);
batch->batch.recv_message = true;
payload_->recv_message.recv_message_ready = &pc->on_done_closure;
payload_->recv_message.recv_message = &pc->payload;
payload_->recv_message.flags = &pc->flags;
payload_->recv_message.call_failed_before_recv_message =
&pc->call_failed_before_recv_message;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status)
-> absl::StatusOr<absl::optional<MessageHandle>> {
if (!status.ok()) return status;
if (!pc->payload.has_value()) {
if (pc->call_failed_before_recv_message) {
return absl::CancelledError();
}
return absl::nullopt;
}
return pc->IntoMessageHandle();
}));
}
inline auto BatchBuilder::ReceiveInitialMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive initial metadata",
batch->DebugPrefix().c_str());
}
auto* pc =
batch->GetInitializedCompletion(&Batch::pending_receive_initial_metadata);
batch->batch.recv_initial_metadata = true;
payload_->recv_initial_metadata.recv_initial_metadata_ready =
&pc->on_done_closure;
payload_->recv_initial_metadata.recv_initial_metadata = pc->metadata.get();
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ClientMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveServerInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveTrailingMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive trailing metadata",
batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(
&Batch::pending_receive_trailing_metadata);
batch->batch.recv_trailing_metadata = true;
payload_->recv_trailing_metadata.recv_trailing_metadata_ready =
&pc->on_done_closure;
payload_->recv_trailing_metadata.recv_trailing_metadata = pc->metadata.get();
payload_->recv_trailing_metadata.collect_stats =
&GetContext<CallContext>()->call_stats()->transport_stream_stats;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ServerMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
inline auto BatchBuilder::ReceiveServerTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
template <>
struct ContextType<BatchBuilder> {};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H

@ -230,8 +230,8 @@ void CallFilters::CancelDueToFailedPipeOperation(SourceLocation but_where) {
"Cancelling due to failed pipe operation: %s", "Cancelling due to failed pipe operation: %s",
DebugString().c_str()); DebugString().c_str());
} }
server_trailing_metadata_ = PushServerTrailingMetadata(
ServerMetadataFromStatus(absl::CancelledError("Failed pipe operation")); ServerMetadataFromStatus(absl::CancelledError("Failed pipe operation")));
server_trailing_metadata_waiter_.Wake(); server_trailing_metadata_waiter_.Wake();
} }

@ -40,13 +40,23 @@ namespace grpc_core {
// The common middle part of a call - a reference is held by each of // The common middle part of a call - a reference is held by each of
// CallInitiator and CallHandler - which provide interfaces that are appropriate // CallInitiator and CallHandler - which provide interfaces that are appropriate
// for each side of a call. // for each side of a call.
// The spine will ultimately host the pipes, filters, and context for one part // Hosts context, call filters, and the arena.
// of a call: ie top-half client channel, sub channel call, server call. class CallSpine final : public Party {
// TODO(ctiller): eventually drop this when we don't need to reference into
// legacy promise calls anymore
class CallSpineInterface {
public: public:
virtual ~CallSpineInterface() = default; static RefCountedPtr<CallSpine> Create(
ClientMetadataHandle client_initial_metadata,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena) {
Arena* arena_ptr = arena.get();
return RefCountedPtr<CallSpine>(arena_ptr->New<CallSpine>(
std::move(client_initial_metadata), event_engine, std::move(arena)));
}
~CallSpine() override {}
CallFilters& call_filters() { return call_filters_; }
Arena* arena() { return arena_.get(); }
// Add a callback to be called when server trailing metadata is received. // Add a callback to be called when server trailing metadata is received.
void OnDone(absl::AnyInvocable<void()> fn) { void OnDone(absl::AnyInvocable<void()> fn) {
if (on_done_ == nullptr) { if (on_done_ == nullptr) {
@ -61,38 +71,70 @@ class CallSpineInterface {
void CallOnDone() { void CallOnDone() {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)(); if (on_done_ != nullptr) std::exchange(on_done_, nullptr)();
} }
virtual Party& party() = 0;
virtual Arena* arena() = 0; auto PullServerInitialMetadata() {
virtual void IncrementRefCount() = 0; return call_filters().PullServerInitialMetadata();
virtual void Unref() = 0; }
virtual Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>> auto PullServerTrailingMetadata() {
PullServerInitialMetadata() = 0; return call_filters().PullServerTrailingMetadata();
virtual Promise<ServerMetadataHandle> PullServerTrailingMetadata() = 0; }
virtual Promise<StatusFlag> PushClientToServerMessage(
MessageHandle message) = 0; auto PushClientToServerMessage(MessageHandle message) {
virtual Promise<ValueOrFailure<absl::optional<MessageHandle>>> return call_filters().PushClientToServerMessage(std::move(message));
PullClientToServerMessage() = 0; }
virtual Promise<StatusFlag> PushServerToClientMessage(
MessageHandle message) = 0; auto PullClientToServerMessage() {
virtual Promise<ValueOrFailure<absl::optional<MessageHandle>>> return call_filters().PullClientToServerMessage();
PullServerToClientMessage() = 0; }
virtual void PushServerTrailingMetadata(ServerMetadataHandle md) = 0;
virtual void FinishSends() = 0; auto PushServerToClientMessage(MessageHandle message) {
virtual Promise<ValueOrFailure<ClientMetadataHandle>> return call_filters().PushServerToClientMessage(std::move(message));
PullClientInitialMetadata() = 0; }
virtual Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) = 0; auto PullServerToClientMessage() {
virtual Promise<bool> WasCancelled() = 0; return call_filters().PullServerToClientMessage();
virtual ClientMetadata& UnprocessedClientInitialMetadata() = 0; }
virtual void V2HackToStartCallWithoutACallFilterStack() = 0;
void PushServerTrailingMetadata(ServerMetadataHandle md) {
call_filters().PushServerTrailingMetadata(std::move(md));
}
void FinishSends() { call_filters().FinishClientToServerSends(); }
auto PullClientInitialMetadata() {
return call_filters().PullClientInitialMetadata();
}
auto PushServerInitialMetadata(absl::optional<ServerMetadataHandle> md) {
bool has_md = md.has_value();
return If(
has_md,
[this, md = std::move(md)]() mutable {
return call_filters().PushServerInitialMetadata(std::move(*md));
},
[this]() {
call_filters().NoServerInitialMetadata();
return Immediate<StatusFlag>(Success{});
});
}
auto WasCancelled() { return call_filters().WasCancelled(); }
ClientMetadata& UnprocessedClientInitialMetadata() {
return *call_filters().unprocessed_client_initial_metadata();
}
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_;
}
// Wrap a promise so that if it returns failure it automatically cancels // Wrap a promise so that if it returns failure it automatically cancels
// the rest of the call. // the rest of the call.
// The resulting (returned) promise will resolve to Empty. // The resulting (returned) promise will resolve to Empty.
template <typename Promise> template <typename Promise>
auto CancelIfFails(Promise promise) { auto CancelIfFails(Promise promise) {
DCHECK(GetContext<Activity>() == &party()); DCHECK(GetContext<Activity>() == this);
using P = promise_detail::PromiseLike<Promise>; using P = promise_detail::PromiseLike<Promise>;
using ResultType = typename P::Result; using ResultType = typename P::Result;
return Map(std::move(promise), [this](ResultType r) { return Map(std::move(promise), [this](ResultType r) {
@ -107,7 +149,7 @@ class CallSpineInterface {
// that detail. // that detail.
template <typename PromiseFactory> template <typename PromiseFactory>
void SpawnInfallible(absl::string_view name, PromiseFactory promise_factory) { void SpawnInfallible(absl::string_view name, PromiseFactory promise_factory) {
party().Spawn(name, std::move(promise_factory), [](Empty) {}); Spawn(name, std::move(promise_factory), [](Empty) {});
} }
// Spawn a promise that returns some status-like type; if the status // Spawn a promise that returns some status-like type; if the status
@ -123,18 +165,17 @@ class CallSpineInterface {
std::is_same<bool, std::is_same<bool,
decltype(IsStatusOk(std::declval<ResultType>()))>::value, decltype(IsStatusOk(std::declval<ResultType>()))>::value,
"SpawnGuarded promise must return a status-like object"); "SpawnGuarded promise must return a status-like object");
party().Spawn( Spawn(name, std::move(promise_factory), [this, whence](ResultType r) {
name, std::move(promise_factory), [this, whence](ResultType r) { if (!IsStatusOk(r)) {
if (!IsStatusOk(r)) { if (grpc_trace_promise_primitives.enabled()) {
if (grpc_trace_promise_primitives.enabled()) { gpr_log(GPR_INFO, "SpawnGuarded sees failure: %s (source: %s:%d)",
gpr_log(GPR_INFO, "SpawnGuarded sees failure: %s (source: %s:%d)", r.ToString().c_str(), whence.file(), whence.line());
r.ToString().c_str(), whence.file(), whence.line()); }
} auto status = StatusCast<ServerMetadataHandle>(std::move(r));
auto status = StatusCast<ServerMetadataHandle>(std::move(r)); status->Set(GrpcCallWasCancelled(), true);
status->Set(GrpcCallWasCancelled(), true); PushServerTrailingMetadata(std::move(status));
PushServerTrailingMetadata(std::move(status)); }
} });
});
} }
// Wrap a promise so that if the call completes that promise is cancelled. // Wrap a promise so that if the call completes that promise is cancelled.
@ -154,217 +195,6 @@ class CallSpineInterface {
}); });
} }
private:
absl::AnyInvocable<void()> on_done_{nullptr};
};
// Implementation of CallSpine atop the v2 Pipe based arrangement.
// This implementation will go away in favor of an implementation atop
// CallFilters by the time v3 lands.
class PipeBasedCallSpine : public CallSpineInterface {
public:
virtual Pipe<ClientMetadataHandle>& client_initial_metadata() = 0;
virtual Pipe<ServerMetadataHandle>& server_initial_metadata() = 0;
virtual Pipe<MessageHandle>& client_to_server_messages() = 0;
virtual Pipe<MessageHandle>& server_to_client_messages() = 0;
virtual Latch<ServerMetadataHandle>& cancel_latch() = 0;
virtual Latch<bool>& was_cancelled_latch() = 0;
Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>>
PullServerInitialMetadata() final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_initial_metadata().receiver.Next(),
[](NextResult<ServerMetadataHandle> md)
-> ValueOrFailure<absl::optional<ServerMetadataHandle>> {
if (!md.has_value()) {
if (md.cancelled()) return Failure{};
return absl::optional<ServerMetadataHandle>();
}
return absl::optional<ServerMetadataHandle>(std::move(*md));
});
}
Promise<ServerMetadataHandle> PullServerTrailingMetadata() final {
DCHECK(GetContext<Activity>() == &party());
return cancel_latch().Wait();
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullServerToClientMessage() final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_to_client_messages().receiver.Next(), MapNextMessage);
}
Promise<StatusFlag> PushClientToServerMessage(MessageHandle message) final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_to_server_messages().sender.Push(std::move(message)),
[](bool r) { return StatusFlag(r); });
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullClientToServerMessage() final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_to_server_messages().receiver.Next(), MapNextMessage);
}
Promise<StatusFlag> PushServerToClientMessage(MessageHandle message) final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_to_client_messages().sender.Push(std::move(message)),
[](bool r) { return StatusFlag(r); });
}
void FinishSends() final {
DCHECK(GetContext<Activity>() == &party());
client_to_server_messages().sender.Close();
}
void PushServerTrailingMetadata(ServerMetadataHandle metadata) final {
DCHECK(GetContext<Activity>() == &party());
auto& c = cancel_latch();
if (c.is_set()) return;
const bool was_cancelled =
metadata->get(GrpcCallWasCancelled()).value_or(false);
c.Set(std::move(metadata));
CallOnDone();
was_cancelled_latch().Set(was_cancelled);
client_initial_metadata().sender.CloseWithError();
server_initial_metadata().sender.Close();
client_to_server_messages().sender.CloseWithError();
server_to_client_messages().sender.Close();
}
Promise<bool> WasCancelled() final {
DCHECK(GetContext<Activity>() == &party());
return was_cancelled_latch().Wait();
}
Promise<ValueOrFailure<ClientMetadataHandle>> PullClientInitialMetadata()
final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_initial_metadata().receiver.Next(),
[](NextResult<ClientMetadataHandle> md)
-> ValueOrFailure<ClientMetadataHandle> {
if (!md.has_value()) return Failure{};
return std::move(*md);
});
}
Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) final {
DCHECK(GetContext<Activity>() == &party());
return If(
md.has_value(),
[&md, this]() {
return Map(server_initial_metadata().sender.Push(std::move(*md)),
[](bool ok) { return StatusFlag(ok); });
},
[this]() {
server_initial_metadata().sender.Close();
return []() -> StatusFlag { return Success{}; };
});
}
private:
static ValueOrFailure<absl::optional<MessageHandle>> MapNextMessage(
NextResult<MessageHandle> r) {
if (!r.has_value()) {
if (r.cancelled()) return Failure{};
return absl::optional<MessageHandle>();
}
return absl::optional<MessageHandle>(std::move(*r));
}
};
class CallSpine final : public CallSpineInterface, public Party {
public:
static RefCountedPtr<CallSpine> Create(
ClientMetadataHandle client_initial_metadata,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena) {
auto* arena_ptr = arena.get();
return RefCountedPtr<CallSpine>(arena_ptr->New<CallSpine>(
std::move(client_initial_metadata), event_engine, std::move(arena)));
}
~CallSpine() override {}
CallFilters& call_filters() { return call_filters_; }
Party& party() override { return *this; }
Arena* arena() override { return arena_.get(); }
void IncrementRefCount() override { Party::IncrementRefCount(); }
void Unref() override { Party::Unref(); }
Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>>
PullServerInitialMetadata() override {
return call_filters().PullServerInitialMetadata();
}
Promise<ServerMetadataHandle> PullServerTrailingMetadata() override {
return call_filters().PullServerTrailingMetadata();
}
Promise<StatusFlag> PushClientToServerMessage(
MessageHandle message) override {
return call_filters().PushClientToServerMessage(std::move(message));
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullClientToServerMessage() override {
return call_filters().PullClientToServerMessage();
}
Promise<StatusFlag> PushServerToClientMessage(
MessageHandle message) override {
return call_filters().PushServerToClientMessage(std::move(message));
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullServerToClientMessage() override {
return call_filters().PullServerToClientMessage();
}
void PushServerTrailingMetadata(ServerMetadataHandle md) override {
call_filters().PushServerTrailingMetadata(std::move(md));
}
void FinishSends() override { call_filters().FinishClientToServerSends(); }
Promise<ValueOrFailure<ClientMetadataHandle>> PullClientInitialMetadata()
override {
return call_filters().PullClientInitialMetadata();
}
Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) override {
if (md.has_value()) {
return call_filters().PushServerInitialMetadata(std::move(*md));
} else {
call_filters().NoServerInitialMetadata();
return Immediate<StatusFlag>(Success{});
}
}
Promise<bool> WasCancelled() override {
return call_filters().WasCancelled();
}
ClientMetadata& UnprocessedClientInitialMetadata() override {
return *call_filters().unprocessed_client_initial_metadata();
}
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_;
}
void V2HackToStartCallWithoutACallFilterStack() override {
CallFilters::StackBuilder empty_stack_builder;
call_filters().SetStack(empty_stack_builder.Build());
}
private: private:
friend class Arena; friend class Arena;
CallSpine(ClientMetadataHandle client_initial_metadata, CallSpine(ClientMetadataHandle client_initial_metadata,
@ -407,11 +237,13 @@ class CallSpine final : public CallSpineInterface, public Party {
CallFilters call_filters_; CallFilters call_filters_;
// Event engine associated with this call // Event engine associated with this call
grpc_event_engine::experimental::EventEngine* const event_engine_; grpc_event_engine::experimental::EventEngine* const event_engine_;
absl::AnyInvocable<void()> on_done_{nullptr};
}; };
class CallInitiator { class CallInitiator {
public: public:
explicit CallInitiator(RefCountedPtr<CallSpineInterface> spine) CallInitiator() = default;
explicit CallInitiator(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {} : spine_(std::move(spine)) {}
template <typename Promise> template <typename Promise>
@ -435,8 +267,9 @@ class CallInitiator {
return spine_->PullServerTrailingMetadata(); return spine_->PullServerTrailingMetadata();
} }
void Cancel() { void Cancel(absl::Status error = absl::CancelledError()) {
auto status = ServerMetadataFromStatus(absl::CancelledError()); CHECK(!error.ok());
auto status = ServerMetadataFromStatus(error);
status->Set(GrpcCallWasCancelled(), true); status->Set(GrpcCallWasCancelled(), true);
spine_->PushServerTrailingMetadata(std::move(status)); spine_->PushServerTrailingMetadata(std::move(status));
} }
@ -461,18 +294,22 @@ class CallInitiator {
template <typename PromiseFactory> template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) { auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory)); return spine_->SpawnWaitable(name, std::move(promise_factory));
} }
Arena* arena() { return spine_->arena(); } Arena* arena() { return spine_->arena(); }
grpc_event_engine::experimental::EventEngine* event_engine() const {
return spine_->event_engine();
}
private: private:
RefCountedPtr<CallSpineInterface> spine_; RefCountedPtr<CallSpine> spine_;
}; };
class CallHandler { class CallHandler {
public: public:
explicit CallHandler(RefCountedPtr<CallSpineInterface> spine) explicit CallHandler(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {} : spine_(std::move(spine)) {}
auto PullClientInitialMetadata() { auto PullClientInitialMetadata() {
@ -521,22 +358,22 @@ class CallHandler {
template <typename PromiseFactory> template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) { auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory)); return spine_->SpawnWaitable(name, std::move(promise_factory));
} }
Arena* arena() { return spine_->arena(); } Arena* arena() { return spine_->arena(); }
grpc_event_engine::experimental::EventEngine* event_engine() const { grpc_event_engine::experimental::EventEngine* event_engine() const {
return DownCast<CallSpine*>(spine_.get())->event_engine(); return spine_->event_engine();
} }
private: private:
RefCountedPtr<CallSpineInterface> spine_; RefCountedPtr<CallSpine> spine_;
}; };
class UnstartedCallHandler { class UnstartedCallHandler {
public: public:
explicit UnstartedCallHandler(RefCountedPtr<CallSpineInterface> spine) explicit UnstartedCallHandler(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {} : spine_(std::move(spine)) {}
void PushServerTrailingMetadata(ServerMetadataHandle status) { void PushServerTrailingMetadata(ServerMetadataHandle status) {
@ -569,29 +406,28 @@ class UnstartedCallHandler {
template <typename PromiseFactory> template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) { auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory)); return spine_->SpawnWaitable(name, std::move(promise_factory));
} }
ClientMetadata& UnprocessedClientInitialMetadata() { ClientMetadata& UnprocessedClientInitialMetadata() {
return spine_->UnprocessedClientInitialMetadata(); return spine_->UnprocessedClientInitialMetadata();
} }
CallHandler V2HackToStartCallWithoutACallFilterStack() { // Helper for the very common situation in tests where we want to start a call
spine_->V2HackToStartCallWithoutACallFilterStack(); // with an empty filter stack.
return CallHandler(std::move(spine_)); CallHandler StartWithEmptyFilterStack() {
return StartCall(CallFilters::StackBuilder().Build());
} }
CallHandler StartCall(RefCountedPtr<CallFilters::Stack> call_filters) { CallHandler StartCall(RefCountedPtr<CallFilters::Stack> call_filters) {
DownCast<CallSpine*>(spine_.get()) spine_->call_filters().SetStack(std::move(call_filters));
->call_filters()
.SetStack(std::move(call_filters));
return CallHandler(std::move(spine_)); return CallHandler(std::move(spine_));
} }
Arena* arena() { return spine_->arena(); } Arena* arena() { return spine_->arena(); }
private: private:
RefCountedPtr<CallSpineInterface> spine_; RefCountedPtr<CallSpine> spine_;
}; };
struct CallInitiatorAndHandler { struct CallInitiatorAndHandler {

@ -550,7 +550,7 @@ class GrpcLb final : public LoadBalancingPolicy {
bool shutting_down_ = false; bool shutting_down_ = false;
// The channel for communicating with the LB server. // The channel for communicating with the LB server.
OrphanablePtr<Channel> lb_channel_; RefCountedPtr<Channel> lb_channel_;
StateWatcher* watcher_ = nullptr; StateWatcher* watcher_ = nullptr;
// Response generator to inject address updates into lb_channel_. // Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_; RefCountedPtr<FakeResolverResponseGenerator> response_generator_;

@ -456,6 +456,19 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
absl::Status status_; absl::Status status_;
}; };
// A picker that returns PickResult::Drop for all picks.
class DropPicker final : public SubchannelPicker {
public:
explicit DropPicker(absl::Status status) : status_(status) {}
PickResult Pick(PickArgs /*args*/) override {
return PickResult::Drop(status_);
}
private:
absl::Status status_;
};
protected: protected:
std::shared_ptr<WorkSerializer> work_serializer() const { std::shared_ptr<WorkSerializer> work_serializer() const {
return work_serializer_; return work_serializer_;

@ -687,7 +687,7 @@ class RlsLb final : public LoadBalancingPolicy {
RefCountedPtr<RlsLb> lb_policy_; RefCountedPtr<RlsLb> lb_policy_;
bool is_shutdown_ = false; bool is_shutdown_ = false;
OrphanablePtr<Channel> channel_; RefCountedPtr<Channel> channel_;
RefCountedPtr<channelz::ChannelNode> parent_channelz_node_; RefCountedPtr<channelz::ChannelNode> parent_channelz_node_;
StateWatcher* watcher_ = nullptr; StateWatcher* watcher_ = nullptr;
Throttle throttle_ ABSL_GUARDED_BY(&RlsLb::mu_); Throttle throttle_ ABSL_GUARDED_BY(&RlsLb::mu_);

@ -67,6 +67,8 @@ extern void RegisterWeightedRoundRobinLbPolicy(
CoreConfiguration::Builder* builder); CoreConfiguration::Builder* builder);
extern void RegisterHttpProxyMapper(CoreConfiguration::Builder* builder); extern void RegisterHttpProxyMapper(CoreConfiguration::Builder* builder);
extern void RegisterConnectedChannel(CoreConfiguration::Builder* builder); extern void RegisterConnectedChannel(CoreConfiguration::Builder* builder);
extern void RegisterLoadBalancedCallDestination(
CoreConfiguration::Builder* builder);
#ifndef GRPC_NO_RLS #ifndef GRPC_NO_RLS
extern void RegisterRlsLbPolicy(CoreConfiguration::Builder* builder); extern void RegisterRlsLbPolicy(CoreConfiguration::Builder* builder);
#endif // !GRPC_NO_RLS #endif // !GRPC_NO_RLS
@ -119,6 +121,7 @@ void BuildCoreConfiguration(CoreConfiguration::Builder* builder) {
RegisterSockaddrResolver(builder); RegisterSockaddrResolver(builder);
RegisterFakeResolver(builder); RegisterFakeResolver(builder);
RegisterHttpProxyMapper(builder); RegisterHttpProxyMapper(builder);
RegisterLoadBalancedCallDestination(builder);
#ifdef GPR_SUPPORT_BINDER_TRANSPORT #ifdef GPR_SUPPORT_BINDER_TRANSPORT
RegisterBinderResolver(builder); RegisterBinderResolver(builder);
#endif #endif

@ -74,11 +74,12 @@
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_utils.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_stack_type.h" #include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/surface/completion_queue.h" #include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/legacy_channel.h" #include "src/core/lib/surface/legacy_channel.h"
#include "src/core/lib/surface/wait_for_cq_end_op.h" #include "src/core/lib/surface/server_call.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h" #include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/interception_chain.h" #include "src/core/lib/transport/interception_chain.h"
@ -770,10 +771,6 @@ class Server::TransportConnectivityWatcher
const grpc_channel_filter Server::kServerTopFilter = { const grpc_channel_filter Server::kServerTopFilter = {
Server::CallData::StartTransportStreamOpBatch, Server::CallData::StartTransportStreamOpBatch,
nullptr,
[](grpc_channel_element*, CallSpineInterface*) {
// TODO(ctiller): remove the server filter when call-v3 is finalized
},
grpc_channel_next_op, grpc_channel_next_op,
sizeof(Server::CallData), sizeof(Server::CallData),
Server::CallData::InitCallElement, Server::CallData::InitCallElement,
@ -809,7 +806,7 @@ RefCountedPtr<channelz::ServerNode> CreateChannelzNode(
absl::StatusOr<ClientMetadataHandle> CheckClientMetadata( absl::StatusOr<ClientMetadataHandle> CheckClientMetadata(
ValueOrFailure<ClientMetadataHandle> md) { ValueOrFailure<ClientMetadataHandle> md) {
if (!md.ok()) { if (!md.ok()) {
return absl::InternalError("Missing metadata"); return absl::InternalError("Error reading metadata");
} }
if (!md.value()->get_pointer(HttpPathMetadata())) { if (!md.value()->get_pointer(HttpPathMetadata())) {
return absl::InternalError("Missing :path header"); return absl::InternalError("Missing :path header");
@ -986,14 +983,16 @@ grpc_error_handle Server::SetupTransport(
++connections_open_; ++connections_open_;
} else { } else {
CHECK(transport->filter_stack_transport() != nullptr); CHECK(transport->filter_stack_transport() != nullptr);
absl::StatusOr<OrphanablePtr<Channel>> channel = LegacyChannel::Create( absl::StatusOr<RefCountedPtr<Channel>> channel = LegacyChannel::Create(
"", args.SetObject(transport), GRPC_SERVER_CHANNEL); "", args.SetObject(transport), GRPC_SERVER_CHANNEL);
if (!channel.ok()) { if (!channel.ok()) {
return absl_status_to_grpc_error(channel.status()); return absl_status_to_grpc_error(channel.status());
} }
CHECK(*channel != nullptr);
auto* channel_stack = (*channel)->channel_stack();
CHECK(channel_stack != nullptr);
ChannelData* chand = static_cast<ChannelData*>( ChannelData* chand = static_cast<ChannelData*>(
grpc_channel_stack_element((*channel)->channel_stack(), 0) grpc_channel_stack_element(channel_stack, 0)->channel_data);
->channel_data);
// Set up CQs. // Set up CQs.
size_t cq_idx; size_t cq_idx;
for (cq_idx = 0; cq_idx < cqs_.size(); cq_idx++) { for (cq_idx = 0; cq_idx < cqs_.size(); cq_idx++) {
@ -1135,7 +1134,7 @@ std::vector<RefCountedPtr<Channel>> Server::GetChannelsLocked() const {
std::vector<RefCountedPtr<Channel>> channels; std::vector<RefCountedPtr<Channel>> channels;
channels.reserve(channels_.size()); channels.reserve(channels_.size());
for (const ChannelData* chand : channels_) { for (const ChannelData* chand : channels_) {
channels.push_back(chand->channel()->Ref()); channels.push_back(chand->channel()->RefAsSubclass<Channel>());
} }
return channels; return channels;
} }
@ -1342,7 +1341,7 @@ class Server::ChannelData::ConnectivityWatcher
: public AsyncConnectivityStateWatcherInterface { : public AsyncConnectivityStateWatcherInterface {
public: public:
explicit ConnectivityWatcher(ChannelData* chand) explicit ConnectivityWatcher(ChannelData* chand)
: chand_(chand), channel_(chand_->channel_->Ref()) {} : chand_(chand), channel_(chand_->channel_->RefAsSubclass<Channel>()) {}
private: private:
void OnConnectivityStateChange(grpc_connectivity_state new_state, void OnConnectivityStateChange(grpc_connectivity_state new_state,
@ -1379,7 +1378,7 @@ Server::ChannelData::~ChannelData() {
} }
void Server::ChannelData::InitTransport(RefCountedPtr<Server> server, void Server::ChannelData::InitTransport(RefCountedPtr<Server> server,
OrphanablePtr<Channel> channel, RefCountedPtr<Channel> channel,
size_t cq_idx, Transport* transport, size_t cq_idx, Transport* transport,
intptr_t channelz_socket_uuid) { intptr_t channelz_socket_uuid) {
server_ = std::move(server); server_ = std::move(server);
@ -1451,7 +1450,7 @@ void Server::ChannelData::AcceptStream(void* arg, Transport* /*transport*/,
auto* chand = static_cast<Server::ChannelData*>(arg); auto* chand = static_cast<Server::ChannelData*>(arg);
// create a call // create a call
grpc_call_create_args args; grpc_call_create_args args;
args.channel = chand->channel_->Ref(); args.channel = chand->channel_->RefAsSubclass<Channel>();
args.server = chand->server_.get(); args.server = chand->server_.get();
args.parent = nullptr; args.parent = nullptr;
args.propagation_mask = 0; args.propagation_mask = 0;

@ -243,7 +243,7 @@ class Server : public ServerInterface,
~ChannelData(); ~ChannelData();
void InitTransport(RefCountedPtr<Server> server, void InitTransport(RefCountedPtr<Server> server,
OrphanablePtr<Channel> channel, size_t cq_idx, RefCountedPtr<Channel> channel, size_t cq_idx,
Transport* transport, intptr_t channelz_socket_uuid); Transport* transport, intptr_t channelz_socket_uuid);
RefCountedPtr<Server> server() const { return server_; } RefCountedPtr<Server> server() const { return server_; }
@ -254,7 +254,6 @@ class Server : public ServerInterface,
static grpc_error_handle InitChannelElement( static grpc_error_handle InitChannelElement(
grpc_channel_element* elem, grpc_channel_element_args* args); grpc_channel_element* elem, grpc_channel_element_args* args);
static void DestroyChannelElement(grpc_channel_element* elem); static void DestroyChannelElement(grpc_channel_element* elem);
void InitCall(RefCountedPtr<CallSpineInterface> call);
private: private:
class ConnectivityWatcher; class ConnectivityWatcher;
@ -267,7 +266,7 @@ class Server : public ServerInterface,
static void FinishDestroy(void* arg, grpc_error_handle error); static void FinishDestroy(void* arg, grpc_error_handle error);
RefCountedPtr<Server> server_; RefCountedPtr<Server> server_;
OrphanablePtr<Channel> channel_; RefCountedPtr<Channel> channel_;
// The index into Server::cqs_ of the CQ used as a starting point for // The index into Server::cqs_ of the CQ used as a starting point for
// where to publish new incoming calls. // where to publish new incoming calls.
size_t cq_idx_; size_t cq_idx_;

@ -102,7 +102,6 @@ ServerCallTracerFilter::Create(const ChannelArgs& /*args*/,
} // namespace } // namespace
void RegisterServerCallTracerFilter(CoreConfiguration::Builder* builder) { void RegisterServerCallTracerFilter(CoreConfiguration::Builder* builder) {
if (IsChaoticGoodEnabled()) return;
builder->channel_init()->RegisterFilter<ServerCallTracerFilter>( builder->channel_init()->RegisterFilter<ServerCallTracerFilter>(
GRPC_SERVER_CHANNEL); GRPC_SERVER_CHANNEL);
} }

@ -253,12 +253,12 @@ class GrpcXdsTransportFactory::GrpcXdsTransport::StateWatcher final
namespace { namespace {
OrphanablePtr<Channel> CreateXdsChannel( RefCountedPtr<Channel> CreateXdsChannel(
const ChannelArgs& args, const GrpcXdsBootstrap::GrpcXdsServer& server) { const ChannelArgs& args, const GrpcXdsBootstrap::GrpcXdsServer& server) {
RefCountedPtr<grpc_channel_credentials> channel_creds = RefCountedPtr<grpc_channel_credentials> channel_creds =
CoreConfiguration::Get().channel_creds_registry().CreateChannelCreds( CoreConfiguration::Get().channel_creds_registry().CreateChannelCreds(
server.channel_creds_config()); server.channel_creds_config());
return OrphanablePtr<Channel>(Channel::FromC(grpc_channel_create( return RefCountedPtr<Channel>(Channel::FromC(grpc_channel_create(
server.server_uri().c_str(), channel_creds.get(), args.ToC().get()))); server.server_uri().c_str(), channel_creds.get(), args.ToC().get())));
} }

@ -83,7 +83,7 @@ class GrpcXdsTransportFactory::GrpcXdsTransport final
class StateWatcher; class StateWatcher;
GrpcXdsTransportFactory* factory_; // Not owned. GrpcXdsTransportFactory* factory_; // Not owned.
OrphanablePtr<Channel> channel_; RefCountedPtr<Channel> channel_;
StateWatcher* watcher_; StateWatcher* watcher_;
}; };

@ -675,22 +675,24 @@ CORE_SOURCE_FILES = [
'src/core/lib/surface/call.cc', 'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc', 'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc', 'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/call_utils.cc',
'src/core/lib/surface/channel.cc', 'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_create.cc', 'src/core/lib/surface/channel_create.cc',
'src/core/lib/surface/channel_init.cc', 'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_stack_type.cc', 'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/client_call.cc',
'src/core/lib/surface/completion_queue.cc', 'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc', 'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc', 'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/filter_stack_call.cc',
'src/core/lib/surface/init.cc', 'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_internally.cc', 'src/core/lib/surface/init_internally.cc',
'src/core/lib/surface/lame_client.cc', 'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/legacy_channel.cc', 'src/core/lib/surface/legacy_channel.cc',
'src/core/lib/surface/metadata_array.cc', 'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server_call.cc',
'src/core/lib/surface/validate_metadata.cc', 'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc', 'src/core/lib/surface/version.cc',
'src/core/lib/surface/wait_for_cq_end_op.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc', 'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/call_arena_allocator.cc', 'src/core/lib/transport/call_arena_allocator.cc',
'src/core/lib/transport/call_filters.cc', 'src/core/lib/transport/call_filters.cc',

@ -0,0 +1,79 @@
# Copyright 2024 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package")
load("//test/core/call/yodel:grpc_yodel_test.bzl", "grpc_yodel_simple_test")
grpc_package(name = "test/core/call")
grpc_yodel_simple_test(
name = "client_call",
srcs = [
"client_call_test.cc",
],
external_deps = ["gtest"],
language = "C++",
deps = [
"batch_builder",
"//:grpc_base",
"//test/core/call/yodel:yodel_test",
],
)
grpc_yodel_simple_test(
name = "server_call",
srcs = [
"server_call_test.cc",
],
external_deps = ["gtest"],
language = "C++",
deps = [
"batch_builder",
"//:grpc_base",
"//test/core/call/yodel:yodel_test",
],
)
grpc_cc_test(
name = "call_utils_test",
srcs = [
"call_utils_test.cc",
],
external_deps = ["gtest"],
language = "C++",
deps = [
"//:grpc_base",
],
)
grpc_cc_library(
name = "batch_builder",
testonly = True,
srcs = [
"batch_builder.cc",
],
hdrs = [
"batch_builder.h",
],
external_deps = [
"absl/strings",
"gtest",
],
visibility = ["//test/core:__subpackages__"],
deps = [
"//:grpc",
"//src/core:slice",
"//test/core/end2end:cq_verifier",
],
)

@ -0,0 +1,208 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test/core/call/batch_builder.h"
#include <grpc/byte_buffer_reader.h>
#include "src/core/lib/compression/message_compress.h"
namespace grpc_core {
ByteBufferUniquePtr ByteBufferFromSlice(Slice slice) {
return ByteBufferUniquePtr(
grpc_raw_byte_buffer_create(const_cast<grpc_slice*>(&slice.c_slice()), 1),
grpc_byte_buffer_destroy);
}
absl::optional<std::string> FindInMetadataArray(const grpc_metadata_array& md,
absl::string_view key) {
for (size_t i = 0; i < md.count; i++) {
if (key == StringViewFromSlice(md.metadata[i].key)) {
return std::string(StringViewFromSlice(md.metadata[i].value));
}
}
return absl::nullopt;
}
absl::optional<std::string> IncomingMetadata::Get(absl::string_view key) const {
return FindInMetadataArray(*metadata_, key);
}
grpc_op IncomingMetadata::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata.recv_initial_metadata = metadata_.get();
return op;
}
std::string IncomingMetadata::GetSuccessfulStateString() {
std::string out = "incoming_metadata: {";
for (size_t i = 0; i < metadata_->count; i++) {
absl::StrAppend(&out, StringViewFromSlice(metadata_->metadata[i].key), ":",
StringViewFromSlice(metadata_->metadata[i].value), ",");
}
return out + "}";
}
std::string IncomingMessage::payload() const {
Slice out;
if (payload_->data.raw.compression > GRPC_COMPRESS_NONE) {
grpc_slice_buffer decompressed_buffer;
grpc_slice_buffer_init(&decompressed_buffer);
CHECK(grpc_msg_decompress(payload_->data.raw.compression,
&payload_->data.raw.slice_buffer,
&decompressed_buffer));
grpc_byte_buffer* rbb = grpc_raw_byte_buffer_create(
decompressed_buffer.slices, decompressed_buffer.count);
grpc_byte_buffer_reader reader;
CHECK(grpc_byte_buffer_reader_init(&reader, rbb));
out = Slice(grpc_byte_buffer_reader_readall(&reader));
grpc_byte_buffer_reader_destroy(&reader);
grpc_byte_buffer_destroy(rbb);
grpc_slice_buffer_destroy(&decompressed_buffer);
} else {
grpc_byte_buffer_reader reader;
CHECK(grpc_byte_buffer_reader_init(&reader, payload_));
out = Slice(grpc_byte_buffer_reader_readall(&reader));
grpc_byte_buffer_reader_destroy(&reader);
}
return std::string(out.begin(), out.end());
}
grpc_op IncomingMessage::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &payload_;
return op;
}
absl::optional<std::string> IncomingStatusOnClient::GetTrailingMetadata(
absl::string_view key) const {
return FindInMetadataArray(data_->trailing_metadata, key);
}
std::string IncomingStatusOnClient::GetSuccessfulStateString() {
std::string out = absl::StrCat(
"status_on_client: status=", data_->status,
" msg=", data_->status_details.as_string_view(), " trailing_metadata={");
for (size_t i = 0; i < data_->trailing_metadata.count; i++) {
absl::StrAppend(
&out, StringViewFromSlice(data_->trailing_metadata.metadata[i].key),
": ", StringViewFromSlice(data_->trailing_metadata.metadata[i].value),
",");
}
return out + "}";
}
std::string IncomingMessage::GetSuccessfulStateString() {
if (payload_ == nullptr) return "message: empty";
return absl::StrCat("message: ", payload().size(), "b uncompressed");
}
grpc_op IncomingStatusOnClient::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op.data.recv_status_on_client.trailing_metadata = &data_->trailing_metadata;
op.data.recv_status_on_client.status = &data_->status;
op.data.recv_status_on_client.status_details =
const_cast<grpc_slice*>(&data_->status_details.c_slice());
op.data.recv_status_on_client.error_string = &data_->error_string;
return op;
}
grpc_op IncomingCloseOnServer::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op.data.recv_close_on_server.cancelled = &cancelled_;
return op;
}
BatchBuilder& BatchBuilder::SendInitialMetadata(
std::initializer_list<std::pair<absl::string_view, absl::string_view>> md,
uint32_t flags, absl::optional<grpc_compression_level> compression_level) {
auto& v = Make<std::vector<grpc_metadata>>();
for (const auto& p : md) {
grpc_metadata m;
m.key = Make<Slice>(Slice::FromCopiedString(p.first)).c_slice();
m.value = Make<Slice>(Slice::FromCopiedString(p.second)).c_slice();
v.push_back(m);
}
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_INITIAL_METADATA;
op.flags = flags;
op.data.send_initial_metadata.count = v.size();
op.data.send_initial_metadata.metadata = v.data();
if (compression_level.has_value()) {
op.data.send_initial_metadata.maybe_compression_level.is_set = 1;
op.data.send_initial_metadata.maybe_compression_level.level =
compression_level.value();
}
ops_.push_back(op);
return *this;
}
BatchBuilder& BatchBuilder::SendMessage(Slice payload, uint32_t flags) {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message =
Make<ByteBufferUniquePtr>(ByteBufferFromSlice(std::move(payload))).get();
op.flags = flags;
ops_.push_back(op);
return *this;
}
BatchBuilder& BatchBuilder::SendCloseFromClient() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops_.push_back(op);
return *this;
}
BatchBuilder& BatchBuilder::SendStatusFromServer(
grpc_status_code status, absl::string_view message,
std::initializer_list<std::pair<absl::string_view, absl::string_view>> md) {
auto& v = Make<std::vector<grpc_metadata>>();
for (const auto& p : md) {
grpc_metadata m;
m.key = Make<Slice>(Slice::FromCopiedString(p.first)).c_slice();
m.value = Make<Slice>(Slice::FromCopiedString(p.second)).c_slice();
v.push_back(m);
}
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op.data.send_status_from_server.trailing_metadata_count = v.size();
op.data.send_status_from_server.trailing_metadata = v.data();
op.data.send_status_from_server.status = status;
op.data.send_status_from_server.status_details = &Make<grpc_slice>(
Make<Slice>(Slice::FromCopiedString(message)).c_slice());
ops_.push_back(op);
return *this;
}
BatchBuilder::~BatchBuilder() {
grpc_call_error err = grpc_call_start_batch(call_, ops_.data(), ops_.size(),
CqVerifier::tag(tag_), nullptr);
EXPECT_EQ(err, GRPC_CALL_OK) << grpc_call_error_to_string(err);
}
} // namespace grpc_core

@ -0,0 +1,261 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_TEST_CORE_CALL_BATCH_BUILDER_H
#define GRPC_TEST_CORE_CALL_BATCH_BUILDER_H
#include "absl/strings/str_cat.h"
#include "gtest/gtest.h"
#include "src/core/lib/slice/slice.h"
#include "test/core/end2end/cq_verifier.h"
namespace grpc_core {
using ByteBufferUniquePtr =
std::unique_ptr<grpc_byte_buffer, void (*)(grpc_byte_buffer*)>;
ByteBufferUniquePtr ByteBufferFromSlice(Slice slice);
absl::optional<std::string> FindInMetadataArray(const grpc_metadata_array& md,
absl::string_view key);
// Receiving container for incoming metadata.
class IncomingMetadata final : public CqVerifier::SuccessfulStateString {
public:
IncomingMetadata() = default;
~IncomingMetadata() {
if (metadata_ != nullptr) grpc_metadata_array_destroy(metadata_.get());
}
// Lookup a metadata value by key.
absl::optional<std::string> Get(absl::string_view key) const;
// Make a GRPC_RECV_INITIAL_METADATA op - intended for the framework, not
// for tests.
grpc_op MakeOp();
std::string GetSuccessfulStateString() override;
private:
std::unique_ptr<grpc_metadata_array> metadata_ =
std::make_unique<grpc_metadata_array>(grpc_metadata_array{0, 0, nullptr});
};
// Receiving container for one incoming message.
class IncomingMessage final : public CqVerifier::SuccessfulStateString {
public:
IncomingMessage() = default;
IncomingMessage(const IncomingMessage&) = delete;
IncomingMessage& operator=(const IncomingMessage&) = delete;
~IncomingMessage() {
if (payload_ != nullptr) grpc_byte_buffer_destroy(payload_);
}
// Get the payload of the message - concatenated together into a string for
// easy verification.
std::string payload() const;
// Check if the message is the end of the stream.
bool is_end_of_stream() const { return payload_ == nullptr; }
// Get the type of the message.
grpc_byte_buffer_type byte_buffer_type() const { return payload_->type; }
// Get the compression algorithm used for the message.
grpc_compression_algorithm compression() const {
return payload_->data.raw.compression;
}
std::string GetSuccessfulStateString() override;
// Make a GRPC_OP_RECV_MESSAGE op - intended for the framework, not for
// tests.
grpc_op MakeOp();
// Accessor for CoreEnd2endTest::IncomingCall - get a pointer to the
// underlying payload.
// We don't want to use this in tests directly.
grpc_byte_buffer** raw_payload_ptr() { return &payload_; }
private:
grpc_byte_buffer* payload_ = nullptr;
};
// Receiving container for incoming status on the client from the server.
class IncomingStatusOnClient final : public CqVerifier::SuccessfulStateString {
public:
IncomingStatusOnClient() = default;
IncomingStatusOnClient(const IncomingStatusOnClient&) = delete;
IncomingStatusOnClient& operator=(const IncomingStatusOnClient&) = delete;
IncomingStatusOnClient(IncomingStatusOnClient&& other) noexcept = default;
IncomingStatusOnClient& operator=(IncomingStatusOnClient&& other) noexcept =
default;
~IncomingStatusOnClient() {
if (data_ != nullptr) {
grpc_metadata_array_destroy(&data_->trailing_metadata);
gpr_free(const_cast<char*>(data_->error_string));
}
}
// Get the status code.
grpc_status_code status() const { return data_->status; }
// Get the status details.
std::string message() const {
return std::string(data_->status_details.as_string_view());
}
// Get the error string.
std::string error_string() const {
return data_->error_string == nullptr ? "" : data_->error_string;
}
// Get a trailing metadata value by key.
absl::optional<std::string> GetTrailingMetadata(absl::string_view key) const;
std::string GetSuccessfulStateString() override;
// Make a GRPC_OP_RECV_STATUS_ON_CLIENT op - intended for the framework, not
// for tests.
grpc_op MakeOp();
private:
struct Data {
grpc_metadata_array trailing_metadata{0, 0, nullptr};
grpc_status_code status;
Slice status_details;
const char* error_string = nullptr;
};
std::unique_ptr<Data> data_ = std::make_unique<Data>();
};
// Receiving container for incoming status on the server from the client.
class IncomingCloseOnServer final : public CqVerifier::SuccessfulStateString {
public:
IncomingCloseOnServer() = default;
IncomingCloseOnServer(const IncomingCloseOnServer&) = delete;
IncomingCloseOnServer& operator=(const IncomingCloseOnServer&) = delete;
// Get the cancellation bit.
bool was_cancelled() const { return cancelled_ != 0; }
// Make a GRPC_OP_RECV_CLOSE_ON_SERVER op - intended for the framework, not
// for tests.
grpc_op MakeOp();
std::string GetSuccessfulStateString() override {
return absl::StrCat("close_on_server: cancelled=", cancelled_);
}
private:
int cancelled_;
};
// Build one batch. Returned from NewBatch (use that to instantiate this!)
// Upon destruction of the BatchBuilder, the batch will be executed with any
// added batches.
class BatchBuilder {
public:
BatchBuilder(grpc_call* call, CqVerifier* cq_verifier, int tag)
: call_(call), tag_(tag), cq_verifier_(cq_verifier) {
cq_verifier_->ClearSuccessfulStateStrings(CqVerifier::tag(tag_));
}
~BatchBuilder();
BatchBuilder(const BatchBuilder&) = delete;
BatchBuilder& operator=(const BatchBuilder&) = delete;
BatchBuilder(BatchBuilder&&) noexcept = default;
// Add a GRPC_OP_SEND_INITIAL_METADATA op.
// Optionally specify flags, compression level.
BatchBuilder& SendInitialMetadata(
std::initializer_list<std::pair<absl::string_view, absl::string_view>> md,
uint32_t flags = 0,
absl::optional<grpc_compression_level> compression_level = absl::nullopt);
// Add a GRPC_OP_SEND_MESSAGE op.
BatchBuilder& SendMessage(Slice payload, uint32_t flags = 0);
BatchBuilder& SendMessage(absl::string_view payload, uint32_t flags = 0) {
return SendMessage(Slice::FromCopiedString(payload), flags);
}
// Add a GRPC_OP_SEND_CLOSE_FROM_CLIENT op.
BatchBuilder& SendCloseFromClient();
// Add a GRPC_OP_SEND_STATUS_FROM_SERVER op.
BatchBuilder& SendStatusFromServer(
grpc_status_code status, absl::string_view message,
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
md);
// Add a GRPC_OP_RECV_INITIAL_METADATA op.
BatchBuilder& RecvInitialMetadata(IncomingMetadata& md) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &md);
ops_.emplace_back(md.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_MESSAGE op.
BatchBuilder& RecvMessage(IncomingMessage& msg) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &msg);
ops_.emplace_back(msg.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_STATUS_ON_CLIENT op.
BatchBuilder& RecvStatusOnClient(IncomingStatusOnClient& status) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &status);
ops_.emplace_back(status.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_CLOSE_ON_SERVER op.
BatchBuilder& RecvCloseOnServer(IncomingCloseOnServer& close) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &close);
ops_.emplace_back(close.MakeOp());
return *this;
}
private:
// We need to track little bits of memory up until the batch is executed.
// One Thing is one such block of memory.
// We specialize it with SpecificThing to track a specific type of memory.
// These get placed on things_ and deleted when the batch is executed.
class Thing {
public:
virtual ~Thing() = default;
};
template <typename T>
class SpecificThing final : public Thing {
public:
template <typename... Args>
explicit SpecificThing(Args&&... args) : t_(std::forward<Args>(args)...) {}
SpecificThing() = default;
T& get() { return t_; }
private:
T t_;
};
// Make a thing of type T, and return a reference to it.
template <typename T, typename... Args>
T& Make(Args&&... args) {
things_.emplace_back(new SpecificThing<T>(std::forward<Args>(args)...));
return static_cast<SpecificThing<T>*>(things_.back().get())->get();
}
grpc_call* call_;
const int tag_;
std::vector<grpc_op> ops_;
std::vector<std::unique_ptr<Thing>> things_;
CqVerifier* const cq_verifier_;
};
} // namespace grpc_core
#endif // GRPC_TEST_CORE_CALL_BATCH_BUILDER_H

@ -0,0 +1,72 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/call_utils.h"
#include <initializer_list>
#include "gtest/gtest.h"
#include <grpc/grpc.h>
namespace grpc_core {
TEST(CallUtils, AreWriteFlagsValid) {
EXPECT_TRUE(AreWriteFlagsValid(0));
EXPECT_TRUE(AreWriteFlagsValid(GRPC_WRITE_BUFFER_HINT));
EXPECT_TRUE(AreWriteFlagsValid(GRPC_WRITE_NO_COMPRESS));
EXPECT_FALSE(AreWriteFlagsValid(0xffffffff));
}
TEST(CallUtils, AreInitialMetadataFlagsValid) {
EXPECT_TRUE(AreInitialMetadataFlagsValid(0));
EXPECT_TRUE(
AreInitialMetadataFlagsValid(GRPC_INITIAL_METADATA_WAIT_FOR_READY));
EXPECT_TRUE(AreInitialMetadataFlagsValid(GRPC_WRITE_THROUGH));
EXPECT_FALSE(AreInitialMetadataFlagsValid(0xffffffff));
}
namespace {
void do_these_things(std::initializer_list<int>) {}
template <typename... T>
std::vector<grpc_op> TestOps(T... ops) {
std::vector<grpc_op> out;
auto add_op = [&out](grpc_op_type type) {
grpc_op op;
op.op = type;
out.push_back(op);
return 1;
};
do_these_things({add_op(ops)...});
return out;
}
} // namespace
TEST(BatchOpIndex, Basic) {
const auto ops = TestOps(GRPC_OP_SEND_INITIAL_METADATA, GRPC_OP_SEND_MESSAGE,
GRPC_OP_SEND_CLOSE_FROM_CLIENT);
BatchOpIndex idx(ops.data(), ops.size());
EXPECT_EQ(idx.op(GRPC_OP_SEND_INITIAL_METADATA), &ops[0]);
EXPECT_EQ(idx.op(GRPC_OP_SEND_MESSAGE), &ops[1]);
EXPECT_EQ(idx.op(GRPC_OP_SEND_CLOSE_FROM_CLIENT), &ops[2]);
EXPECT_EQ(idx.op(GRPC_OP_SEND_STATUS_FROM_SERVER), nullptr);
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -0,0 +1,251 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/client_call.h"
#include "absl/status/status.h"
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/transport/metadata.h"
#include "test/core/call/batch_builder.h"
#include "test/core/call/yodel/yodel_test.h"
namespace grpc_core {
namespace {
const absl::string_view kDefaultPath = "/foo/bar";
}
class ClientCallTest : public YodelTest {
protected:
using YodelTest::YodelTest;
class CallOptions {
public:
Slice path() const { return path_.Copy(); }
absl::optional<Slice> authority() const {
return authority_.has_value() ? absl::optional<Slice>(authority_->Copy())
: absl::nullopt;
}
bool registered_method() const { return registered_method_; }
Duration timeout() const { return timeout_; }
grpc_compression_options compression_options() const {
return compression_options_;
}
CallOptions& SetTimeout(Duration timeout) {
timeout_ = timeout;
return *this;
}
private:
Slice path_ = Slice::FromCopiedString(kDefaultPath);
absl::optional<Slice> authority_;
bool registered_method_ = false;
Duration timeout_ = Duration::Infinity();
grpc_compression_options compression_options_ = {
1,
{0, GRPC_COMPRESS_LEVEL_NONE},
{0, GRPC_COMPRESS_NONE},
};
};
grpc_call* InitCall(const CallOptions& options) {
CHECK_EQ(call_, nullptr);
call_ = MakeClientCall(nullptr, 0, cq_, options.path(), options.authority(),
options.registered_method(),
options.timeout() + Timestamp::Now(),
options.compression_options(), event_engine().get(),
SimpleArenaAllocator()->MakeArena(), destination_);
return call_;
}
BatchBuilder NewBatch(int tag) {
return BatchBuilder(call_, cq_verifier_.get(), tag);
}
// Pull in CqVerifier types for ergonomics
using ExpectedResult = CqVerifier::ExpectedResult;
using Maybe = CqVerifier::Maybe;
using PerformAction = CqVerifier::PerformAction;
using MaybePerformAction = CqVerifier::MaybePerformAction;
using AnyStatus = CqVerifier::AnyStatus;
void Expect(int tag, ExpectedResult result, SourceLocation whence = {}) {
expectations_++;
cq_verifier_->Expect(CqVerifier::tag(tag), std::move(result), whence);
}
void TickThroughCqExpectations(
absl::optional<Duration> timeout = absl::nullopt,
SourceLocation whence = {}) {
if (expectations_ == 0) {
cq_verifier_->VerifyEmpty(timeout.value_or(Duration::Seconds(1)), whence);
return;
}
expectations_ = 0;
cq_verifier_->Verify(timeout.value_or(Duration::Seconds(10)), whence);
}
CallHandler& handler() {
CHECK(handler_.has_value());
return *handler_;
}
private:
class TestCallDestination final : public UnstartedCallDestination {
public:
explicit TestCallDestination(ClientCallTest* test) : test_(test) {}
void Orphaned() override {}
void StartCall(UnstartedCallHandler handler) override {
CHECK(!test_->handler_.has_value());
test_->handler_.emplace(handler.StartWithEmptyFilterStack());
}
private:
ClientCallTest* const test_;
};
void InitTest() override {
cq_ = grpc_completion_queue_create_for_next(nullptr);
cq_verifier_ = absl::make_unique<CqVerifier>(
cq_, CqVerifier::FailUsingGprCrash,
[this](
grpc_event_engine::experimental::EventEngine::Duration max_step) {
event_engine()->Tick(max_step);
});
}
void Shutdown() override {
if (call_ != nullptr) {
grpc_call_unref(call_);
}
handler_.reset();
grpc_completion_queue_shutdown(cq_);
auto ev = grpc_completion_queue_next(
cq_, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
CHECK_EQ(ev.type, GRPC_QUEUE_SHUTDOWN);
grpc_completion_queue_destroy(cq_);
}
grpc_completion_queue* cq_ = nullptr;
grpc_call* call_ = nullptr;
RefCountedPtr<TestCallDestination> destination_ =
MakeRefCounted<TestCallDestination>(this);
absl::optional<CallHandler> handler_;
std::unique_ptr<CqVerifier> cq_verifier_;
int expectations_ = 0;
};
#define CLIENT_CALL_TEST(name) YODEL_TEST(ClientCallTest, name)
CLIENT_CALL_TEST(NoOp) { InitCall(CallOptions()); }
CLIENT_CALL_TEST(SendInitialMetadata) {
InitCall(CallOptions());
NewBatch(1).SendInitialMetadata({
{"foo", "bar"},
});
Expect(1, true);
TickThroughCqExpectations();
SpawnTestSeq(
handler(), "pull-initial-metadata",
[this]() { return handler().PullClientInitialMetadata(); },
[](ValueOrFailure<ClientMetadataHandle> md) {
CHECK(md.ok());
CHECK_NE((*md)->get_pointer(HttpPathMetadata()), nullptr);
EXPECT_EQ((*md)->get_pointer(HttpPathMetadata())->as_string_view(),
kDefaultPath);
std::string buffer;
auto r = (*md)->GetStringValue("foo", &buffer);
EXPECT_EQ(r, "bar");
return Immediate(Empty{});
});
WaitForAllPendingWork();
}
CLIENT_CALL_TEST(SendInitialMetadataAndReceiveStatusAfterCancellation) {
InitCall(CallOptions());
IncomingStatusOnClient status;
NewBatch(1).SendInitialMetadata({}).RecvStatusOnClient(status);
SpawnTestSeq(
handler(), "pull-initial-metadata",
[this]() { return handler().PullClientInitialMetadata(); },
[this](ValueOrFailure<ClientMetadataHandle> md) {
CHECK(md.ok());
EXPECT_EQ((*md)->get_pointer(HttpPathMetadata())->as_string_view(),
kDefaultPath);
handler().PushServerTrailingMetadata(
ServerMetadataFromStatus(absl::InternalError("test error")));
return Immediate(Empty{});
});
Expect(1, true);
TickThroughCqExpectations();
EXPECT_EQ(status.status(), GRPC_STATUS_INTERNAL);
EXPECT_EQ(status.message(), "test error");
WaitForAllPendingWork();
}
CLIENT_CALL_TEST(SendInitialMetadataAndReceiveStatusAfterTimeout) {
auto start = Timestamp::Now();
InitCall(CallOptions().SetTimeout(Duration::Seconds(1)));
IncomingStatusOnClient status;
NewBatch(1).SendInitialMetadata({}).RecvStatusOnClient(status);
Expect(1, true);
TickThroughCqExpectations();
EXPECT_EQ(status.status(), GRPC_STATUS_DEADLINE_EXCEEDED);
ExecCtx::Get()->InvalidateNow();
auto now = Timestamp::Now();
EXPECT_GE(now - start, Duration::Seconds(1)) << GRPC_DUMP_ARGS(now, start);
EXPECT_LE(now - start, Duration::Seconds(5)) << GRPC_DUMP_ARGS(now, start);
WaitForAllPendingWork();
}
CLIENT_CALL_TEST(CancelBeforeInvoke1) {
grpc_call_cancel(InitCall(CallOptions()), nullptr);
IncomingStatusOnClient status;
NewBatch(1).RecvStatusOnClient(status);
Expect(1, true);
TickThroughCqExpectations();
EXPECT_EQ(status.status(), GRPC_STATUS_CANCELLED);
}
CLIENT_CALL_TEST(CancelBeforeInvoke2) {
grpc_call_cancel(InitCall(CallOptions()), nullptr);
IncomingStatusOnClient status;
NewBatch(1).RecvStatusOnClient(status).SendInitialMetadata({});
Expect(1, true);
TickThroughCqExpectations();
EXPECT_EQ(status.status(), GRPC_STATUS_CANCELLED);
}
CLIENT_CALL_TEST(NegativeDeadline) {
auto start = Timestamp::Now();
InitCall(CallOptions().SetTimeout(Duration::Seconds(-1)));
IncomingStatusOnClient status;
NewBatch(1).SendInitialMetadata({}).RecvStatusOnClient(status);
Expect(1, true);
TickThroughCqExpectations();
EXPECT_EQ(status.status(), GRPC_STATUS_DEADLINE_EXCEEDED);
auto now = Timestamp::Now();
EXPECT_LE(now - start, Duration::Milliseconds(100))
<< GRPC_DUMP_ARGS(now, start);
WaitForAllPendingWork();
}
} // namespace grpc_core

@ -0,0 +1,138 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/server_call.h"
#include <atomic>
#include "absl/status/status.h"
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include "src/core/channelz/channelz.h"
#include "src/core/lib/promise/promise.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/telemetry/call_tracer.h"
#include "test/core/call/batch_builder.h"
#include "test/core/call/yodel/yodel_test.h"
namespace grpc_core {
namespace {
const absl::string_view kDefaultPath = "/foo/bar";
}
class ServerCallTest : public YodelTest {
protected:
using YodelTest::YodelTest;
grpc_call* InitCall(ClientMetadataHandle client_initial_metadata) {
CHECK_EQ(call_, nullptr);
auto call =
MakeCallPair(std::move(client_initial_metadata), event_engine().get(),
SimpleArenaAllocator()->MakeArena());
call.initiator.SpawnGuarded(
"initial_metadata",
[this, handler = call.handler.StartWithEmptyFilterStack()]() mutable {
return TrySeq(
handler.PullClientInitialMetadata(),
[this,
handler](ClientMetadataHandle client_initial_metadata) mutable {
call_.store(MakeServerCall(std::move(handler),
std::move(client_initial_metadata),
&test_server_, cq_,
&publish_initial_metadata_),
std::memory_order_release);
return absl::OkStatus();
});
});
while (true) {
auto* result = call_.load(std::memory_order_acquire);
if (result != nullptr) return result;
}
}
ClientMetadataHandle MakeClientInitialMetadata(
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
md) {
auto client_initial_metadata = Arena::MakePooled<ClientMetadata>();
client_initial_metadata->Set(HttpPathMetadata(),
Slice::FromCopiedString(kDefaultPath));
for (const auto& pair : md) {
client_initial_metadata->Append(
pair.first, Slice::FromCopiedBuffer(pair.second),
[](absl::string_view error, const Slice&) { Crash(error); });
}
return client_initial_metadata;
}
absl::optional<std::string> GetClientInitialMetadata(absl::string_view key) {
CHECK_NE(call_.load(std::memory_order_acquire), nullptr);
return FindInMetadataArray(publish_initial_metadata_, key);
}
private:
class TestServer final : public ServerInterface {
public:
const ChannelArgs& channel_args() const override { return channel_args_; }
channelz::ServerNode* channelz_node() const override { return nullptr; }
ServerCallTracerFactory* server_call_tracer_factory() const override {
return nullptr;
}
grpc_compression_options compression_options() const override {
return {
1,
{0, GRPC_COMPRESS_LEVEL_NONE},
{0, GRPC_COMPRESS_NONE},
};
}
private:
ChannelArgs channel_args_;
};
void InitTest() override {
cq_ = grpc_completion_queue_create_for_next(nullptr);
}
void Shutdown() override {
auto* call = call_.load(std::memory_order_acquire);
if (call != nullptr) {
grpc_call_unref(call);
}
grpc_completion_queue_shutdown(cq_);
auto ev = grpc_completion_queue_next(
cq_, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
CHECK_EQ(ev.type, GRPC_QUEUE_SHUTDOWN);
grpc_completion_queue_destroy(cq_);
}
grpc_completion_queue* cq_{nullptr};
std::atomic<grpc_call*> call_{nullptr};
CallInitiator call_initiator_;
TestServer test_server_;
grpc_metadata_array publish_initial_metadata_{0, 0, nullptr};
};
#define SERVER_CALL_TEST(name) YODEL_TEST(ServerCallTest, name)
SERVER_CALL_TEST(NoOp) { InitCall(MakeClientInitialMetadata({})); }
SERVER_CALL_TEST(InitialMetadataPassedThrough) {
InitCall(MakeClientInitialMetadata({{"foo", "bar"}}));
EXPECT_EQ(GetClientInitialMetadata("foo"), "bar");
}
} // namespace grpc_core

@ -41,8 +41,10 @@ grpc_cc_library(
], ],
visibility = ["//test:__subpackages__"], visibility = ["//test:__subpackages__"],
deps = [ deps = [
"//:config",
"//:debug_location", "//:debug_location",
"//:event_engine_base_hdrs", "//:event_engine_base_hdrs",
"//:exec_ctx",
"//:iomgr_timer", "//:iomgr_timer",
"//:promise", "//:promise",
"//src/core:call_arena_allocator", "//src/core:call_arena_allocator",
@ -51,6 +53,7 @@ grpc_cc_library(
"//src/core:metadata", "//src/core:metadata",
"//src/core:promise_factory", "//src/core:promise_factory",
"//src/core:resource_quota", "//src/core:resource_quota",
"//test/core/event_engine:event_engine_test_utils",
"//test/core/event_engine/fuzzing_event_engine", "//test/core/event_engine/fuzzing_event_engine",
"//test/core/test_util:grpc_test_util", "//test/core/test_util:grpc_test_util",
], ],

@ -22,6 +22,8 @@
#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/timer_manager.h" #include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/resource_quota/resource_quota.h" #include "src/core/lib/resource_quota/resource_quota.h"
#include "test/core/event_engine/event_engine_test_utils.h"
#include "test/core/test_util/build.h"
namespace grpc_core { namespace grpc_core {
@ -144,12 +146,13 @@ void YodelTest::RunTest() {
state_->event_engine = state_->event_engine =
std::make_shared<grpc_event_engine::experimental::FuzzingEventEngine>( std::make_shared<grpc_event_engine::experimental::FuzzingEventEngine>(
[]() { []() {
grpc_timer_manager_set_threading(false); grpc_timer_manager_set_start_threaded(false);
grpc_event_engine::experimental::FuzzingEventEngine::Options grpc_event_engine::experimental::FuzzingEventEngine::Options
options; options;
return options; return options;
}(), }(),
actions_); actions_);
grpc_init();
state_->call_arena_allocator = MakeRefCounted<CallArenaAllocator>( state_->call_arena_allocator = MakeRefCounted<CallArenaAllocator>(
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator( ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator(
"test-allocator"), "test-allocator"),
@ -168,6 +171,14 @@ void YodelTest::RunTest() {
Shutdown(); Shutdown();
state_->event_engine->TickUntilIdle(); state_->event_engine->TickUntilIdle();
state_->event_engine->UnsetGlobalHooks(); state_->event_engine->UnsetGlobalHooks();
grpc_event_engine::experimental::WaitForSingleOwner(
std::move(state_->event_engine));
grpc_shutdown_blocking();
if (!grpc_wait_until_shutdown(10)) {
LOG(FATAL) << "Timeout in waiting for gRPC shutdown";
}
state_.reset();
AsanAssertNoLeaks();
} }
void YodelTest::TickUntilTrue(absl::FunctionRef<bool()> poll) { void YodelTest::TickUntilTrue(absl::FunctionRef<bool()> poll) {

@ -390,7 +390,6 @@ class YodelTest : public ::testing::Test {
private: private:
class WatchDog; class WatchDog;
struct State { struct State {
grpc::testing::TestGrpcScope grpc_scope;
std::shared_ptr<grpc_event_engine::experimental::FuzzingEventEngine> std::shared_ptr<grpc_event_engine::experimental::FuzzingEventEngine>
event_engine; event_engine;
RefCountedPtr<CallArenaAllocator> call_arena_allocator; RefCountedPtr<CallArenaAllocator> call_arena_allocator;

@ -59,14 +59,13 @@ const grpc_channel_filter* FilterNamed(const char* name) {
auto it = filters->find(name); auto it = filters->find(name);
if (it != filters->end()) return it->second; if (it != filters->end()) return it->second;
return filters return filters
->emplace( ->emplace(name,
name, new grpc_channel_filter{
new grpc_channel_filter{ grpc_call_next_op, grpc_channel_next_op, 0, CallInitFunc,
grpc_call_next_op, nullptr, nullptr, grpc_channel_next_op, 0, grpc_call_stack_ignore_set_pollset_or_pollset_set,
CallInitFunc, grpc_call_stack_ignore_set_pollset_or_pollset_set, CallDestroyFunc, 0, ChannelInitFunc,
CallDestroyFunc, 0, ChannelInitFunc, [](grpc_channel_stack*, grpc_channel_element*) {},
[](grpc_channel_stack*, grpc_channel_element*) {}, ChannelDestroyFunc, grpc_channel_next_get_info, name})
ChannelDestroyFunc, grpc_channel_next_get_info, name})
.first->second; .first->second;
} }

@ -82,8 +82,6 @@ static void free_call(void* arg, grpc_error_handle /*error*/) {
TEST(ChannelStackTest, CreateChannelStack) { TEST(ChannelStackTest, CreateChannelStack) {
const grpc_channel_filter filter = { const grpc_channel_filter filter = {
call_func, call_func,
nullptr,
nullptr,
channel_func, channel_func,
sizeof(int), sizeof(int),
call_init_func, call_init_func,

@ -46,7 +46,7 @@ class ClientChannelTest : public YodelTest {
ClientChannel& InitChannel(const ChannelArgs& args) { ClientChannel& InitChannel(const ChannelArgs& args) {
auto channel = ClientChannel::Create(TestTarget(), CompleteArgs(args)); auto channel = ClientChannel::Create(TestTarget(), CompleteArgs(args));
CHECK_OK(channel); CHECK_OK(channel);
channel_ = OrphanablePtr<ClientChannel>( channel_ = RefCountedPtr<ClientChannel>(
DownCast<ClientChannel*>(channel->release())); DownCast<ClientChannel*>(channel->release()));
return *channel_; return *channel_;
} }
@ -114,8 +114,7 @@ class ClientChannelTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination { class TestCallDestination final : public UnstartedCallDestination {
public: public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override { void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push( handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
unstarted_call_handler.V2HackToStartCallWithoutACallFilterStack());
} }
absl::optional<CallHandler> PopHandler() { absl::optional<CallHandler> PopHandler() {
@ -217,6 +216,7 @@ class ClientChannelTest : public YodelTest {
.SetObject(&client_channel_factory_) .SetObject(&client_channel_factory_)
.SetObject(ResourceQuota::Default()) .SetObject(ResourceQuota::Default())
.SetObject(std::static_pointer_cast<EventEngine>(event_engine())) .SetObject(std::static_pointer_cast<EventEngine>(event_engine()))
.SetIfUnset(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true)
// TODO(ctiller): remove once v3 supports retries? // TODO(ctiller): remove once v3 supports retries?
.SetIfUnset(GRPC_ARG_ENABLE_RETRIES, 0); .SetIfUnset(GRPC_ARG_ENABLE_RETRIES, 0);
} }
@ -233,9 +233,10 @@ class ClientChannelTest : public YodelTest {
ExecCtx exec_ctx; ExecCtx exec_ctx;
channel_.reset(); channel_.reset();
picker_.reset(); picker_.reset();
call_destination_.reset();
} }
OrphanablePtr<ClientChannel> channel_; RefCountedPtr<ClientChannel> channel_;
absl::optional<ClientChannel::PickerObservable> picker_; absl::optional<ClientChannel::PickerObservable> picker_;
TestCallDestinationFactory call_destination_factory_{this}; TestCallDestinationFactory call_destination_factory_{this};
TestClientChannelFactory client_channel_factory_; TestClientChannelFactory client_channel_factory_;
@ -251,26 +252,19 @@ class ClientChannelTest : public YodelTest {
CLIENT_CHANNEL_TEST(NoOp) { InitChannel(ChannelArgs()); } CLIENT_CHANNEL_TEST(NoOp) { InitChannel(ChannelArgs()); }
CLIENT_CHANNEL_TEST(CreateCall) {
auto& channel = InitChannel(ChannelArgs());
auto call_initiator = channel.CreateCall(MakeClientInitialMetadata());
SpawnTestSeq(call_initiator, "cancel", [call_initiator]() mutable {
call_initiator.Cancel();
return Empty{};
});
WaitForAllPendingWork();
}
CLIENT_CHANNEL_TEST(StartCall) { CLIENT_CHANNEL_TEST(StartCall) {
auto& channel = InitChannel(ChannelArgs()); auto& channel = InitChannel(ChannelArgs());
auto call_initiator = channel.CreateCall(MakeClientInitialMetadata()); auto call = MakeCallPair(MakeClientInitialMetadata(), channel.event_engine(),
channel.call_arena_allocator()->MakeArena());
channel.StartCall(std::move(call.handler));
QueueNameResolutionResult( QueueNameResolutionResult(
MakeSuccessfulResolutionResult("ipv4:127.0.0.1:1234")); MakeSuccessfulResolutionResult("ipv4:127.0.0.1:1234"));
auto call_handler = TickUntilCallStarted(); auto call_handler = TickUntilCallStarted();
SpawnTestSeq(call_initiator, "cancel", [call_initiator]() mutable { SpawnTestSeq(call.initiator, "cancel",
call_initiator.Cancel(); [call_initiator = call.initiator]() mutable {
return Empty{}; call_initiator.Cancel();
}); return Empty{};
});
WaitForAllPendingWork(); WaitForAllPendingWork();
} }

@ -75,8 +75,7 @@ class LoadBalancedCallDestinationTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination { class TestCallDestination final : public UnstartedCallDestination {
public: public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override { void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push( handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
unstarted_call_handler.V2HackToStartCallWithoutACallFilterStack());
} }
absl::optional<CallHandler> PopHandler() { absl::optional<CallHandler> PopHandler() {
@ -133,7 +132,7 @@ class LoadBalancedCallDestinationTest : public YodelTest {
subchannel_.reset(); subchannel_.reset();
} }
OrphanablePtr<ClientChannel> channel_; RefCountedPtr<ClientChannel> channel_;
ClientChannel::PickerObservable picker_{nullptr}; ClientChannel::PickerObservable picker_{nullptr};
RefCountedPtr<TestCallDestination> call_destination_ = RefCountedPtr<TestCallDestination> call_destination_ =
MakeRefCounted<TestCallDestination>(); MakeRefCounted<TestCallDestination>();
@ -196,6 +195,44 @@ LOAD_BALANCED_CALL_DESTINATION_TEST(StartCall) {
WaitForAllPendingWork(); WaitForAllPendingWork();
} }
LOAD_BALANCED_CALL_DESTINATION_TEST(StartCallOnDestroyedChannel) {
// Create a call.
auto call = MakeCall(MakeClientInitialMetadata());
// Client side part of the call: wait for status and expect that it's
// UNAVAILABLE
SpawnTestSeq(
call.initiator, "initiator",
[this, handler = std::move(call.handler),
initiator = call.initiator]() mutable {
destination_under_test().StartCall(handler);
return initiator.PullServerTrailingMetadata();
},
[](ServerMetadataHandle md) {
EXPECT_EQ(md->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN),
GRPC_STATUS_UNAVAILABLE);
return Empty{};
});
// Set a picker and wait for at least one pick attempt to prove the call has
// made it to the picker.
auto mock_picker = MakeRefCounted<StrictMock<MockPicker>>();
std::atomic<bool> queued_once{false};
EXPECT_CALL(*mock_picker, Pick)
.WillOnce([&queued_once](LoadBalancingPolicy::PickArgs) {
queued_once.store(true, std::memory_order_relaxed);
return LoadBalancingPolicy::PickResult::Queue{};
});
picker().Set(mock_picker);
TickUntil<Empty>([&queued_once]() -> Poll<Empty> {
if (queued_once.load(std::memory_order_relaxed)) return Empty{};
return Pending();
});
// Now set the drop picker (as the client channel does at shutdown) which
// should trigger Unavailable to be seen by the client side part of the call.
picker().Set(MakeRefCounted<LoadBalancingPolicy::DropPicker>(
absl::UnavailableError("Channel destroyed")));
WaitForAllPendingWork();
}
// TODO(roth, ctiller): more tests // TODO(roth, ctiller): more tests
// - tests for the picker returning queue, fail, and drop results. // - tests for the picker returning queue, fail, and drop results.

@ -147,6 +147,7 @@ grpc_cc_library(
"//src/core:no_destruct", "//src/core:no_destruct",
"//src/core:slice", "//src/core:slice",
"//src/core:time", "//src/core:time",
"//test/core/call:batch_builder",
"//test/core/event_engine:event_engine_test_utils", "//test/core/event_engine:event_engine_test_utils",
"//test/core/test_util:grpc_test_util", "//test/core/test_util:grpc_test_util",
], ],

@ -986,29 +986,22 @@ std::vector<CoreTestConfiguration> DefaultConfigs() {
return std::make_unique<InsecureFixtureWithPipeForWakeupFd>(); return std::make_unique<InsecureFixtureWithPipeForWakeupFd>();
}}, }},
#endif #endif
CoreTestConfiguration {
"ChaoticGoodFullStack",
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL |
FEATURE_MASK_DOES_NOT_SUPPORT_RETRY |
FEATURE_MASK_DOES_NOT_SUPPORT_WRITE_BUFFERING,
nullptr,
[](const ChannelArgs& /*client_args*/,
const ChannelArgs& /*server_args*/) {
return std::make_unique<ChaoticGoodFixture>();
}
}
}; };
} }
std::vector<CoreTestConfiguration> ChaoticGoodFixtures() {
return std::vector<CoreTestConfiguration>{
CoreTestConfiguration{"ChaoticGoodFullStack",
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL |
FEATURE_MASK_DOES_NOT_SUPPORT_RETRY |
FEATURE_MASK_DOES_NOT_SUPPORT_WRITE_BUFFERING,
nullptr,
[](const ChannelArgs& /*client_args*/,
const ChannelArgs& /*server_args*/) {
return std::make_unique<ChaoticGoodFixture>();
}}};
}
std::vector<CoreTestConfiguration> AllConfigs() { std::vector<CoreTestConfiguration> AllConfigs() {
std::vector<CoreTestConfiguration> configs; std::vector<CoreTestConfiguration> configs = DefaultConfigs();
if (IsExperimentEnabledInConfiguration(kExperimentIdChaoticGood)) {
configs = ChaoticGoodFixtures();
} else {
configs = DefaultConfigs();
}
std::sort(configs.begin(), configs.end(), std::sort(configs.begin(), configs.end(),
[](const CoreTestConfiguration& a, const CoreTestConfiguration& b) { [](const CoreTestConfiguration& a, const CoreTestConfiguration& b) {
return strcmp(a.name, b.name) < 0; return strcmp(a.name, b.name) < 0;

@ -31,7 +31,6 @@
#include <grpc/compression.h> #include <grpc/compression.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include "src/core/lib/compression/message_compress.h"
#include "src/core/lib/config/core_configuration.h" #include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/event_engine/default_event_engine.h" #include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gprpp/no_destruct.h" #include "src/core/lib/gprpp/no_destruct.h"
@ -62,24 +61,6 @@ Slice RandomBinarySlice(size_t length) {
return Slice::FromCopiedBuffer(output); return Slice::FromCopiedBuffer(output);
} }
ByteBufferUniquePtr ByteBufferFromSlice(Slice slice) {
return ByteBufferUniquePtr(
grpc_raw_byte_buffer_create(const_cast<grpc_slice*>(&slice.c_slice()), 1),
grpc_byte_buffer_destroy);
}
namespace {
absl::optional<std::string> FindInMetadataArray(const grpc_metadata_array& md,
absl::string_view key) {
for (size_t i = 0; i < md.count; i++) {
if (key == StringViewFromSlice(md.metadata[i].key)) {
return std::string(StringViewFromSlice(md.metadata[i].value));
}
}
return absl::nullopt;
}
} // namespace
void CoreEnd2endTest::SetUp() { void CoreEnd2endTest::SetUp() {
CoreConfiguration::Reset(); CoreConfiguration::Reset();
initialized_ = false; initialized_ = false;
@ -126,182 +107,6 @@ void CoreEnd2endTest::TearDown() {
initialized_ = false; initialized_ = false;
} }
absl::optional<std::string> CoreEnd2endTest::IncomingMetadata::Get(
absl::string_view key) const {
return FindInMetadataArray(*metadata_, key);
}
grpc_op CoreEnd2endTest::IncomingMetadata::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata.recv_initial_metadata = metadata_.get();
return op;
}
std::string CoreEnd2endTest::IncomingMetadata::GetSuccessfulStateString() {
std::string out = "incoming_metadata: {";
for (size_t i = 0; i < metadata_->count; i++) {
absl::StrAppend(&out, StringViewFromSlice(metadata_->metadata[i].key), ":",
StringViewFromSlice(metadata_->metadata[i].value), ",");
}
return out + "}";
}
std::string CoreEnd2endTest::IncomingMessage::payload() const {
Slice out;
if (payload_->data.raw.compression > GRPC_COMPRESS_NONE) {
grpc_slice_buffer decompressed_buffer;
grpc_slice_buffer_init(&decompressed_buffer);
CHECK(grpc_msg_decompress(payload_->data.raw.compression,
&payload_->data.raw.slice_buffer,
&decompressed_buffer));
grpc_byte_buffer* rbb = grpc_raw_byte_buffer_create(
decompressed_buffer.slices, decompressed_buffer.count);
grpc_byte_buffer_reader reader;
CHECK(grpc_byte_buffer_reader_init(&reader, rbb));
out = Slice(grpc_byte_buffer_reader_readall(&reader));
grpc_byte_buffer_reader_destroy(&reader);
grpc_byte_buffer_destroy(rbb);
grpc_slice_buffer_destroy(&decompressed_buffer);
} else {
grpc_byte_buffer_reader reader;
CHECK(grpc_byte_buffer_reader_init(&reader, payload_));
out = Slice(grpc_byte_buffer_reader_readall(&reader));
grpc_byte_buffer_reader_destroy(&reader);
}
return std::string(out.begin(), out.end());
}
grpc_op CoreEnd2endTest::IncomingMessage::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &payload_;
return op;
}
absl::optional<std::string>
CoreEnd2endTest::IncomingStatusOnClient::GetTrailingMetadata(
absl::string_view key) const {
return FindInMetadataArray(data_->trailing_metadata, key);
}
std::string
CoreEnd2endTest::IncomingStatusOnClient::GetSuccessfulStateString() {
std::string out = absl::StrCat(
"status_on_client: status=", data_->status,
" msg=", data_->status_details.as_string_view(), " trailing_metadata={");
for (size_t i = 0; i < data_->trailing_metadata.count; i++) {
absl::StrAppend(
&out, StringViewFromSlice(data_->trailing_metadata.metadata[i].key),
": ", StringViewFromSlice(data_->trailing_metadata.metadata[i].value),
",");
}
return out + "}";
}
std::string CoreEnd2endTest::IncomingMessage::GetSuccessfulStateString() {
if (payload_ == nullptr) return "message: empty";
return absl::StrCat("message: ", payload().size(), "b uncompressed");
}
grpc_op CoreEnd2endTest::IncomingStatusOnClient::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op.data.recv_status_on_client.trailing_metadata = &data_->trailing_metadata;
op.data.recv_status_on_client.status = &data_->status;
op.data.recv_status_on_client.status_details =
const_cast<grpc_slice*>(&data_->status_details.c_slice());
op.data.recv_status_on_client.error_string = &data_->error_string;
return op;
}
grpc_op CoreEnd2endTest::IncomingCloseOnServer::MakeOp() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op.data.recv_close_on_server.cancelled = &cancelled_;
return op;
}
CoreEnd2endTest::BatchBuilder&
CoreEnd2endTest::BatchBuilder::SendInitialMetadata(
std::initializer_list<std::pair<absl::string_view, absl::string_view>> md,
uint32_t flags, absl::optional<grpc_compression_level> compression_level) {
auto& v = Make<std::vector<grpc_metadata>>();
for (const auto& p : md) {
grpc_metadata m;
m.key = Make<Slice>(Slice::FromCopiedString(p.first)).c_slice();
m.value = Make<Slice>(Slice::FromCopiedString(p.second)).c_slice();
v.push_back(m);
}
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_INITIAL_METADATA;
op.flags = flags;
op.data.send_initial_metadata.count = v.size();
op.data.send_initial_metadata.metadata = v.data();
if (compression_level.has_value()) {
op.data.send_initial_metadata.maybe_compression_level.is_set = 1;
op.data.send_initial_metadata.maybe_compression_level.level =
compression_level.value();
}
ops_.push_back(op);
return *this;
}
CoreEnd2endTest::BatchBuilder& CoreEnd2endTest::BatchBuilder::SendMessage(
Slice payload, uint32_t flags) {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message =
Make<ByteBufferUniquePtr>(ByteBufferFromSlice(std::move(payload))).get();
op.flags = flags;
ops_.push_back(op);
return *this;
}
CoreEnd2endTest::BatchBuilder&
CoreEnd2endTest::BatchBuilder::SendCloseFromClient() {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops_.push_back(op);
return *this;
}
CoreEnd2endTest::BatchBuilder&
CoreEnd2endTest::BatchBuilder::SendStatusFromServer(
grpc_status_code status, absl::string_view message,
std::initializer_list<std::pair<absl::string_view, absl::string_view>> md) {
auto& v = Make<std::vector<grpc_metadata>>();
for (const auto& p : md) {
grpc_metadata m;
m.key = Make<Slice>(Slice::FromCopiedString(p.first)).c_slice();
m.value = Make<Slice>(Slice::FromCopiedString(p.second)).c_slice();
v.push_back(m);
}
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op.data.send_status_from_server.trailing_metadata_count = v.size();
op.data.send_status_from_server.trailing_metadata = v.data();
op.data.send_status_from_server.status = status;
op.data.send_status_from_server.status_details = &Make<grpc_slice>(
Make<Slice>(Slice::FromCopiedString(message)).c_slice());
ops_.push_back(op);
return *this;
}
CoreEnd2endTest::BatchBuilder::~BatchBuilder() {
grpc_call_error err = grpc_call_start_batch(call_, ops_.data(), ops_.size(),
CqVerifier::tag(tag_), nullptr);
EXPECT_EQ(err, GRPC_CALL_OK) << grpc_call_error_to_string(err);
}
CoreEnd2endTest::Call CoreEnd2endTest::ClientCallBuilder::Create() { CoreEnd2endTest::Call CoreEnd2endTest::ClientCallBuilder::Create() {
if (auto* u = absl::get_if<UnregisteredCall>(&call_selector_)) { if (auto* u = absl::get_if<UnregisteredCall>(&call_selector_)) {
absl::optional<Slice> host; absl::optional<Slice> host;
@ -352,8 +157,8 @@ CoreEnd2endTest::IncomingCall::IncomingCall(CoreEnd2endTest& test, void* method,
EXPECT_EQ(grpc_server_request_registered_call( EXPECT_EQ(grpc_server_request_registered_call(
test.server(), method, impl_->call.call_ptr(), test.server(), method, impl_->call.call_ptr(),
&impl_->call_details.deadline, &impl_->request_metadata, &impl_->call_details.deadline, &impl_->request_metadata,
message == nullptr ? nullptr : &message->payload_, test.cq(), message == nullptr ? nullptr : message->raw_payload_ptr(),
test.cq(), CqVerifier::tag(tag)), test.cq(), test.cq(), CqVerifier::tag(tag)),
GRPC_CALL_OK); GRPC_CALL_OK);
} }

@ -1,5 +1,3 @@
//
//
// Copyright 2015 gRPC authors. // Copyright 2015 gRPC authors.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
@ -13,8 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//
//
#ifndef GRPC_TEST_CORE_END2END_END2END_TESTS_H #ifndef GRPC_TEST_CORE_END2END_END2END_TESTS_H
#define GRPC_TEST_CORE_END2END_END2END_TESTS_H #define GRPC_TEST_CORE_END2END_END2END_TESTS_H
@ -61,6 +57,7 @@
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call_test_only.h" #include "src/core/lib/surface/call_test_only.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
#include "test/core/call/batch_builder.h"
#include "test/core/end2end/cq_verifier.h" #include "test/core/end2end/cq_verifier.h"
#include "test/core/event_engine/event_engine_test_utils.h" #include "test/core/event_engine/event_engine_test_utils.h"
#include "test/core/test_util/test_config.h" #include "test/core/test_util/test_config.h"
@ -106,9 +103,6 @@ class CoreTestFixture {
Slice RandomSlice(size_t length); Slice RandomSlice(size_t length);
Slice RandomBinarySlice(size_t length); Slice RandomBinarySlice(size_t length);
using ByteBufferUniquePtr =
std::unique_ptr<grpc_byte_buffer, void (*)(grpc_byte_buffer*)>;
ByteBufferUniquePtr ByteBufferFromSlice(Slice slice);
struct CoreTestConfiguration { struct CoreTestConfiguration {
// A descriptive name for this test fixture. // A descriptive name for this test fixture.
@ -252,236 +246,6 @@ class CoreEnd2endTest : public ::testing::Test {
gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_REALTIME); gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_REALTIME);
}; };
// Receiving container for incoming metadata.
class IncomingMetadata final : public CqVerifier::SuccessfulStateString {
public:
IncomingMetadata() = default;
~IncomingMetadata() {
if (metadata_ != nullptr) grpc_metadata_array_destroy(metadata_.get());
}
// Lookup a metadata value by key.
absl::optional<std::string> Get(absl::string_view key) const;
// Make a GRPC_RECV_INITIAL_METADATA op - intended for the framework, not
// for tests.
grpc_op MakeOp();
std::string GetSuccessfulStateString() override;
private:
std::unique_ptr<grpc_metadata_array> metadata_ =
std::make_unique<grpc_metadata_array>(
grpc_metadata_array{0, 0, nullptr});
};
class IncomingCall;
// Receiving container for one incoming message.
class IncomingMessage final : public CqVerifier::SuccessfulStateString {
public:
IncomingMessage() = default;
IncomingMessage(const IncomingMessage&) = delete;
IncomingMessage& operator=(const IncomingMessage&) = delete;
~IncomingMessage() {
if (payload_ != nullptr) grpc_byte_buffer_destroy(payload_);
}
// Get the payload of the message - concatenated together into a string for
// easy verification.
std::string payload() const;
// Check if the message is the end of the stream.
bool is_end_of_stream() const { return payload_ == nullptr; }
// Get the type of the message.
grpc_byte_buffer_type byte_buffer_type() const { return payload_->type; }
// Get the compression algorithm used for the message.
grpc_compression_algorithm compression() const {
return payload_->data.raw.compression;
}
std::string GetSuccessfulStateString() override;
// Make a GRPC_OP_RECV_MESSAGE op - intended for the framework, not for
// tests.
grpc_op MakeOp();
private:
friend class IncomingCall;
grpc_byte_buffer* payload_ = nullptr;
};
// Receiving container for incoming status on the client from the server.
class IncomingStatusOnClient final
: public CqVerifier::SuccessfulStateString {
public:
IncomingStatusOnClient() = default;
IncomingStatusOnClient(const IncomingStatusOnClient&) = delete;
IncomingStatusOnClient& operator=(const IncomingStatusOnClient&) = delete;
IncomingStatusOnClient(IncomingStatusOnClient&& other) noexcept = default;
IncomingStatusOnClient& operator=(IncomingStatusOnClient&& other) noexcept =
default;
~IncomingStatusOnClient() {
if (data_ != nullptr) {
grpc_metadata_array_destroy(&data_->trailing_metadata);
gpr_free(const_cast<char*>(data_->error_string));
}
}
// Get the status code.
grpc_status_code status() const { return data_->status; }
// Get the status details.
std::string message() const {
return std::string(data_->status_details.as_string_view());
}
// Get the error string.
std::string error_string() const {
return data_->error_string == nullptr ? "" : data_->error_string;
}
// Get a trailing metadata value by key.
absl::optional<std::string> GetTrailingMetadata(
absl::string_view key) const;
std::string GetSuccessfulStateString() override;
// Make a GRPC_OP_RECV_STATUS_ON_CLIENT op - intended for the framework, not
// for tests.
grpc_op MakeOp();
private:
struct Data {
grpc_metadata_array trailing_metadata{0, 0, nullptr};
grpc_status_code status;
Slice status_details;
const char* error_string = nullptr;
};
std::unique_ptr<Data> data_ = std::make_unique<Data>();
};
// Receiving container for incoming status on the server from the client.
class IncomingCloseOnServer final : public CqVerifier::SuccessfulStateString {
public:
IncomingCloseOnServer() = default;
IncomingCloseOnServer(const IncomingCloseOnServer&) = delete;
IncomingCloseOnServer& operator=(const IncomingCloseOnServer&) = delete;
// Get the cancellation bit.
bool was_cancelled() const { return cancelled_ != 0; }
// Make a GRPC_OP_RECV_CLOSE_ON_SERVER op - intended for the framework, not
// for tests.
grpc_op MakeOp();
std::string GetSuccessfulStateString() override {
return absl::StrCat("close_on_server: cancelled=", cancelled_);
}
private:
int cancelled_;
};
// Build one batch. Returned from NewBatch (use that to instantiate this!)
// Upon destruction of the BatchBuilder, the batch will be executed with any
// added batches.
class BatchBuilder {
public:
BatchBuilder(grpc_call* call, CoreEnd2endTest* test, int tag)
: call_(call), tag_(tag), cq_verifier_(&test->cq_verifier()) {
cq_verifier_->ClearSuccessfulStateStrings(CqVerifier::tag(tag_));
}
~BatchBuilder();
BatchBuilder(const BatchBuilder&) = delete;
BatchBuilder& operator=(const BatchBuilder&) = delete;
BatchBuilder(BatchBuilder&&) noexcept = default;
// Add a GRPC_OP_SEND_INITIAL_METADATA op.
// Optionally specify flags, compression level.
BatchBuilder& SendInitialMetadata(
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
md,
uint32_t flags = 0,
absl::optional<grpc_compression_level> compression_level =
absl::nullopt);
// Add a GRPC_OP_SEND_MESSAGE op.
BatchBuilder& SendMessage(Slice payload, uint32_t flags = 0);
BatchBuilder& SendMessage(absl::string_view payload, uint32_t flags = 0) {
return SendMessage(Slice::FromCopiedString(payload), flags);
}
// Add a GRPC_OP_SEND_CLOSE_FROM_CLIENT op.
BatchBuilder& SendCloseFromClient();
// Add a GRPC_OP_SEND_STATUS_FROM_SERVER op.
BatchBuilder& SendStatusFromServer(
grpc_status_code status, absl::string_view message,
std::initializer_list<std::pair<absl::string_view, absl::string_view>>
md);
// Add a GRPC_OP_RECV_INITIAL_METADATA op.
BatchBuilder& RecvInitialMetadata(IncomingMetadata& md) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &md);
ops_.emplace_back(md.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_MESSAGE op.
BatchBuilder& RecvMessage(IncomingMessage& msg) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &msg);
ops_.emplace_back(msg.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_STATUS_ON_CLIENT op.
BatchBuilder& RecvStatusOnClient(IncomingStatusOnClient& status) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &status);
ops_.emplace_back(status.MakeOp());
return *this;
}
// Add a GRPC_OP_RECV_CLOSE_ON_SERVER op.
BatchBuilder& RecvCloseOnServer(IncomingCloseOnServer& close) {
cq_verifier_->AddSuccessfulStateString(CqVerifier::tag(tag_), &close);
ops_.emplace_back(close.MakeOp());
return *this;
}
private:
// We need to track little bits of memory up until the batch is executed.
// One Thing is one such block of memory.
// We specialize it with SpecificThing to track a specific type of memory.
// These get placed on things_ and deleted when the batch is executed.
class Thing {
public:
virtual ~Thing() = default;
};
template <typename T>
class SpecificThing final : public Thing {
public:
template <typename... Args>
explicit SpecificThing(Args&&... args)
: t_(std::forward<Args>(args)...) {}
SpecificThing() = default;
T& get() { return t_; }
private:
T t_;
};
// Make a thing of type T, and return a reference to it.
template <typename T, typename... Args>
T& Make(Args&&... args) {
things_.emplace_back(new SpecificThing<T>(std::forward<Args>(args)...));
return static_cast<SpecificThing<T>*>(things_.back().get())->get();
}
grpc_call* call_;
const int tag_;
std::vector<grpc_op> ops_;
std::vector<std::unique_ptr<Thing>> things_;
CqVerifier* const cq_verifier_;
};
// Wrapper around a grpc_call. // Wrapper around a grpc_call.
// Instantiated by ClientCallBuilder via NewClientCall for client calls. // Instantiated by ClientCallBuilder via NewClientCall for client calls.
// Wrapped by IncomingCall for server calls. // Wrapped by IncomingCall for server calls.
@ -498,7 +262,9 @@ class CoreEnd2endTest : public ::testing::Test {
} }
// Construct a batch with a tag - upon destruction of the BatchBuilder the // Construct a batch with a tag - upon destruction of the BatchBuilder the
// operation will occur. // operation will occur.
BatchBuilder NewBatch(int tag) { return BatchBuilder(call_, test_, tag); } BatchBuilder NewBatch(int tag) {
return BatchBuilder(call_, &test_->cq_verifier(), tag);
}
// Cancel the call // Cancel the call
void Cancel() { grpc_call_cancel(call_, nullptr); } void Cancel() { grpc_call_cancel(call_, nullptr); }
void CancelWithStatus(grpc_status_code status, const char* message) { void CancelWithStatus(grpc_status_code status, const char* message) {
@ -636,7 +402,6 @@ class CoreEnd2endTest : public ::testing::Test {
} }
// Pull in CqVerifier types for ergonomics // Pull in CqVerifier types for ergonomics
// TODO(ctiller): evaluate just dropping CqVerifier and folding it in here.
using ExpectedResult = CqVerifier::ExpectedResult; using ExpectedResult = CqVerifier::ExpectedResult;
using Maybe = CqVerifier::Maybe; using Maybe = CqVerifier::Maybe;
using PerformAction = CqVerifier::PerformAction; using PerformAction = CqVerifier::PerformAction;
@ -913,7 +678,7 @@ class CoreEnd2endTestRegistry {
if (g_is_fuzzing_core_e2e_tests) GTEST_SKIP() << "Skipping test for fuzzing" if (g_is_fuzzing_core_e2e_tests) GTEST_SKIP() << "Skipping test for fuzzing"
#define SKIP_IF_CHAOTIC_GOOD() \ #define SKIP_IF_CHAOTIC_GOOD() \
if (IsChaoticGoodEnabled()) { \ if (absl::StrContains(GetParam()->name, "ChaoticGood")) { \
GTEST_SKIP() << "Disabled for initial chaotic good testing"; \ GTEST_SKIP() << "Disabled for initial chaotic good testing"; \
} }

@ -43,8 +43,8 @@ CORE_END2END_TEST(RetryHttp2Test, BadPing) {
.Set(GRPC_ARG_HTTP2_MAX_PING_STRIKES, MAX_PING_STRIKES) .Set(GRPC_ARG_HTTP2_MAX_PING_STRIKES, MAX_PING_STRIKES)
.Set(GRPC_ARG_HTTP2_BDP_PROBE, 0)); .Set(GRPC_ARG_HTTP2_BDP_PROBE, 0));
auto c = NewClientCall("/foo").Timeout(Duration::Seconds(10)).Create(); auto c = NewClientCall("/foo").Timeout(Duration::Seconds(10)).Create();
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendCloseFromClient() .SendCloseFromClient()
@ -67,7 +67,7 @@ CORE_END2END_TEST(RetryHttp2Test, BadPing) {
} }
Step(); Step();
} }
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(102) s.NewBatch(102)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {}) .SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {})
@ -101,8 +101,8 @@ CORE_END2END_TEST(RetryHttp2Test, PingsWithoutData) {
.Set(GRPC_ARG_HTTP2_MAX_PING_STRIKES, MAX_PING_STRIKES) .Set(GRPC_ARG_HTTP2_MAX_PING_STRIKES, MAX_PING_STRIKES)
.Set(GRPC_ARG_HTTP2_BDP_PROBE, 0)); .Set(GRPC_ARG_HTTP2_BDP_PROBE, 0));
auto c = NewClientCall("/foo").Timeout(Duration::Seconds(10)).Create(); auto c = NewClientCall("/foo").Timeout(Duration::Seconds(10)).Create();
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendCloseFromClient() .SendCloseFromClient()
@ -122,7 +122,7 @@ CORE_END2END_TEST(RetryHttp2Test, PingsWithoutData) {
} }
Step(); Step();
} }
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(102) s.NewBatch(102)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {}) .SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {})

@ -48,9 +48,9 @@ static void BinaryMetadata(CoreEnd2endTest& test, bool server_true_binary,
auto status_string = RandomBinarySlice(256); auto status_string = RandomBinarySlice(256);
auto c = test.NewClientCall("/foo").Timeout(Duration::Minutes(1)).Create(); auto c = test.NewClientCall("/foo").Timeout(Duration::Minutes(1)).Create();
CoreEnd2endTest::IncomingMetadata server_initial_md; IncomingMetadata server_initial_md;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({ .SendInitialMetadata({
{"key1-bin", key1_payload.as_string_view()}, {"key1-bin", key1_payload.as_string_view()},
@ -64,7 +64,7 @@ static void BinaryMetadata(CoreEnd2endTest& test, bool server_true_binary,
auto s = test.RequestCall(101); auto s = test.RequestCall(101);
test.Expect(101, true); test.Expect(101, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
s.NewBatch(102) s.NewBatch(102)
.SendInitialMetadata({ .SendInitialMetadata({
{"key3-bin", key3_payload.as_string_view()}, {"key3-bin", key3_payload.as_string_view()},
@ -73,7 +73,7 @@ static void BinaryMetadata(CoreEnd2endTest& test, bool server_true_binary,
.RecvMessage(client_message); .RecvMessage(client_message);
test.Expect(102, true); test.Expect(102, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(103) s.NewBatch(103)
.RecvCloseOnServer(client_close) .RecvCloseOnServer(client_close)
.SendMessage(response_payload.Ref()) .SendMessage(response_payload.Ref())

@ -73,9 +73,9 @@ void TestRequestResponseWithPayloadAndCallCreds(CoreEnd2endTest& test,
} }
EXPECT_NE(creds, nullptr); EXPECT_NE(creds, nullptr);
c.SetCredentials(creds); c.SetCredentials(creds);
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendMessage("hello world") .SendMessage("hello world")
@ -90,11 +90,11 @@ void TestRequestResponseWithPayloadAndCallCreds(CoreEnd2endTest& test,
PrintAuthContext(true, c.GetAuthContext().get()); PrintAuthContext(true, c.GetAuthContext().get());
// Cannot set creds on the server call object. // Cannot set creds on the server call object.
EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK); EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK);
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message); s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message);
test.Expect(102, true); test.Expect(102, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(103) s.NewBatch(103)
.RecvCloseOnServer(client_close) .RecvCloseOnServer(client_close)
.SendMessage("hello you") .SendMessage("hello you")
@ -138,9 +138,9 @@ void TestRequestResponseWithPayloadAndOverriddenCallCreds(
overridden_fake_md_value); overridden_fake_md_value);
} }
c.SetCredentials(creds); c.SetCredentials(creds);
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendMessage("hello world") .SendMessage("hello world")
@ -155,11 +155,11 @@ void TestRequestResponseWithPayloadAndOverriddenCallCreds(
PrintAuthContext(true, c.GetAuthContext().get()); PrintAuthContext(true, c.GetAuthContext().get());
// Cannot set creds on the server call object. // Cannot set creds on the server call object.
EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK); EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK);
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message); s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message);
test.Expect(102, true); test.Expect(102, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(103) s.NewBatch(103)
.RecvCloseOnServer(client_close) .RecvCloseOnServer(client_close)
.SendMessage("hello you") .SendMessage("hello you")
@ -197,9 +197,9 @@ void TestRequestResponseWithPayloadAndDeletedCallCreds(
EXPECT_NE(creds, nullptr); EXPECT_NE(creds, nullptr);
c.SetCredentials(creds); c.SetCredentials(creds);
c.SetCredentials(nullptr); c.SetCredentials(nullptr);
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendMessage("hello world") .SendMessage("hello world")
@ -214,11 +214,11 @@ void TestRequestResponseWithPayloadAndDeletedCallCreds(
PrintAuthContext(true, c.GetAuthContext().get()); PrintAuthContext(true, c.GetAuthContext().get());
// Cannot set creds on the server call object. // Cannot set creds on the server call object.
EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK); EXPECT_NE(grpc_call_set_credentials(s.c_call(), nullptr), GRPC_CALL_OK);
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message); s.NewBatch(102).SendInitialMetadata({}).RecvMessage(client_message);
test.Expect(102, true); test.Expect(102, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(103) s.NewBatch(103)
.RecvCloseOnServer(client_close) .RecvCloseOnServer(client_close)
.SendMessage("hello you") .SendMessage("hello you")
@ -248,9 +248,9 @@ CORE_END2END_TEST(PerCallCredsOnInsecureTest,
grpc_md_only_test_credentials_create(fake_md_key, fake_md_value); grpc_md_only_test_credentials_create(fake_md_key, fake_md_value);
EXPECT_NE(creds, nullptr); EXPECT_NE(creds, nullptr);
c.SetCredentials(creds); c.SetCredentials(creds);
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendMessage("hello world") .SendMessage("hello world")
@ -313,9 +313,9 @@ CORE_END2END_TEST(PerCallCredsOnInsecureTest, FailToSendCallCreds) {
creds = grpc_google_iam_credentials_create(iam_token, iam_selector, nullptr); creds = grpc_google_iam_credentials_create(iam_token, iam_selector, nullptr);
EXPECT_NE(creds, nullptr); EXPECT_NE(creds, nullptr);
c.SetCredentials(creds); c.SetCredentials(creds);
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendMessage("hello world") .SendMessage("hello world")

@ -40,8 +40,8 @@ CORE_END2END_TEST(CoreClientChannelTest, CallHostOverride) {
.Host("foo.test.google.fr:1234") .Host("foo.test.google.fr:1234")
.Create(); .Create();
EXPECT_NE(c.GetPeer(), absl::nullopt); EXPECT_NE(c.GetPeer(), absl::nullopt);
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
c.NewBatch(1) c.NewBatch(1)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendCloseFromClient() .SendCloseFromClient()
@ -52,7 +52,7 @@ CORE_END2END_TEST(CoreClientChannelTest, CallHostOverride) {
Step(); Step();
EXPECT_NE(s.GetPeer(), absl::nullopt); EXPECT_NE(s.GetPeer(), absl::nullopt);
EXPECT_NE(c.GetPeer(), absl::nullopt); EXPECT_NE(c.GetPeer(), absl::nullopt);
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(102) s.NewBatch(102)
.SendInitialMetadata({}) .SendInitialMetadata({})
.SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {}) .SendStatusFromServer(GRPC_STATUS_UNIMPLEMENTED, "xyz", {})

@ -38,9 +38,9 @@ void CancelAfterAccept(CoreEnd2endTest& test,
std::unique_ptr<CancellationMode> cancellation_mode, std::unique_ptr<CancellationMode> cancellation_mode,
Duration timeout) { Duration timeout) {
auto c = test.NewClientCall("/service/method").Timeout(timeout).Create(); auto c = test.NewClientCall("/service/method").Timeout(timeout).Create();
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
c.NewBatch(1) c.NewBatch(1)
.RecvStatusOnClient(server_status) .RecvStatusOnClient(server_status)
.SendInitialMetadata({}) .SendInitialMetadata({})
@ -50,8 +50,8 @@ void CancelAfterAccept(CoreEnd2endTest& test,
auto s = test.RequestCall(2); auto s = test.RequestCall(2);
test.Expect(2, true); test.Expect(2, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(3) s.NewBatch(3)
.RecvMessage(client_message) .RecvMessage(client_message)
.SendInitialMetadata({}) .SendInitialMetadata({})

@ -36,9 +36,9 @@ void CancelAfterClientDone(
auto c = test.NewClientCall("/service/method") auto c = test.NewClientCall("/service/method")
.Timeout(Duration::Seconds(5)) .Timeout(Duration::Seconds(5))
.Create(); .Create();
CoreEnd2endTest::IncomingStatusOnClient server_status; IncomingStatusOnClient server_status;
CoreEnd2endTest::IncomingMetadata server_initial_metadata; IncomingMetadata server_initial_metadata;
CoreEnd2endTest::IncomingMessage server_message; IncomingMessage server_message;
c.NewBatch(1) c.NewBatch(1)
.RecvStatusOnClient(server_status) .RecvStatusOnClient(server_status)
.SendInitialMetadata({}) .SendInitialMetadata({})
@ -49,8 +49,8 @@ void CancelAfterClientDone(
auto s = test.RequestCall(2); auto s = test.RequestCall(2);
test.Expect(2, true); test.Expect(2, true);
test.Step(); test.Step();
CoreEnd2endTest::IncomingMessage client_message; IncomingMessage client_message;
CoreEnd2endTest::IncomingCloseOnServer client_close; IncomingCloseOnServer client_close;
s.NewBatch(3) s.NewBatch(3)
.RecvMessage(client_message) .RecvMessage(client_message)
.SendInitialMetadata({}) .SendInitialMetadata({})

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save