Merge branch 'master' into grpc_verbosity_flag_support

pull/36798/head
tanvi-jagtap 8 months ago
commit ab0d600849
  1. 31
      BUILD
  2. 571
      CMakeLists.txt
  3. 6
      Makefile
  4. 12
      Package.swift
  5. 10
      bazel/experiments.bzl
  6. 1096
      build_autogenerated.yaml
  7. 6
      config.m4
  8. 6
      config.w32
  9. 50
      examples/cpp/csm/observability/BUILD
  10. 37
      examples/cpp/csm/observability/Dockerfile.client
  11. 37
      examples/cpp/csm/observability/Dockerfile.server
  12. 35
      examples/cpp/csm/observability/README.md
  13. 79
      examples/cpp/csm/observability/csm_greeter_client.cc
  14. 67
      examples/cpp/csm/observability/csm_greeter_server.cc
  15. 2
      examples/cpp/otel/BUILD
  16. 14
      gRPC-C++.podspec
  17. 18
      gRPC-Core.podspec
  18. 12
      grpc.gemspec
  19. 3
      include/grpc/support/port_platform.h
  20. 4
      include/grpcpp/client_context.h
  21. 8
      include/grpcpp/completion_queue.h
  22. 18
      include/grpcpp/impl/call_op_set.h
  23. 94
      include/grpcpp/impl/interceptor_common.h
  24. 6
      include/grpcpp/impl/proto_utils.h
  25. 18
      include/grpcpp/impl/rpc_service_method.h
  26. 18
      include/grpcpp/impl/server_callback_handlers.h
  27. 14
      include/grpcpp/impl/service_type.h
  28. 4
      include/grpcpp/impl/sync.h
  29. 6
      include/grpcpp/server_interface.h
  30. 90
      include/grpcpp/support/async_stream.h
  31. 20
      include/grpcpp/support/async_unary_call.h
  32. 16
      include/grpcpp/support/callback_common.h
  33. 34
      include/grpcpp/support/client_callback.h
  34. 4
      include/grpcpp/support/client_interceptor.h
  35. 4
      include/grpcpp/support/method_handler.h
  36. 10
      include/grpcpp/support/proto_buffer_reader.h
  37. 10
      include/grpcpp/support/proto_buffer_writer.h
  38. 4
      include/grpcpp/support/server_interceptor.h
  39. 22
      include/grpcpp/support/sync_stream.h
  40. 12
      package.xml
  41. 97
      src/core/client_channel/client_channel.cc
  42. 6
      src/core/client_channel/client_channel.h
  43. 297
      src/core/client_channel/client_channel_filter.cc
  44. 41
      src/core/client_channel/client_channel_filter.h
  45. 15
      src/core/client_channel/client_channel_plugin.cc
  46. 20
      src/core/client_channel/load_balanced_call_destination.cc
  47. 2
      src/core/client_channel/retry_filter.cc
  48. 37
      src/core/client_channel/subchannel.cc
  49. 2
      src/core/client_channel/subchannel.h
  50. 16
      src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc
  51. 9
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  52. 24
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  53. 44
      src/core/ext/transport/chaotic_good/server_transport.cc
  54. 2
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  55. 10
      src/core/ext/transport/inproc/inproc_transport.cc
  56. 32
      src/core/lib/channel/channel_stack.cc
  57. 25
      src/core/lib/channel/channel_stack.h
  58. 5
      src/core/lib/channel/channel_stack_builder.h
  59. 141
      src/core/lib/channel/channel_stack_builder_impl.cc
  60. 2
      src/core/lib/channel/channel_stack_builder_impl.h
  61. 710
      src/core/lib/channel/connected_channel.cc
  62. 7
      src/core/lib/channel/promise_based_filter.cc
  63. 476
      src/core/lib/channel/promise_based_filter.h
  64. 2
      src/core/lib/event_engine/shim.cc
  65. 27
      src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc
  66. 7
      src/core/lib/event_engine/thready_event_engine/thready_event_engine.h
  67. 99
      src/core/lib/experiments/experiments.cc
  68. 24
      src/core/lib/experiments/experiments.h
  69. 23
      src/core/lib/experiments/experiments.yaml
  70. 2
      src/core/lib/gprpp/dual_ref_counted.h
  71. 10
      src/core/lib/gprpp/dump_args.h
  72. 9
      src/core/lib/gprpp/ref_counted.h
  73. 8
      src/core/lib/gprpp/single_set_ptr.h
  74. 14
      src/core/lib/promise/cancel_callback.h
  75. 3
      src/core/lib/promise/detail/basic_seq.h
  76. 5
      src/core/lib/promise/party.h
  77. 3264
      src/core/lib/surface/call.cc
  78. 105
      src/core/lib/surface/call.h
  79. 286
      src/core/lib/surface/call_utils.cc
  80. 457
      src/core/lib/surface/call_utils.h
  81. 17
      src/core/lib/surface/channel.h
  82. 6
      src/core/lib/surface/channel_create.cc
  83. 5
      src/core/lib/surface/channel_create.h
  84. 4
      src/core/lib/surface/channel_init.cc
  85. 423
      src/core/lib/surface/client_call.cc
  86. 180
      src/core/lib/surface/client_call.h
  87. 1164
      src/core/lib/surface/filter_stack_call.cc
  88. 372
      src/core/lib/surface/filter_stack_call.h
  89. 30
      src/core/lib/surface/legacy_channel.cc
  90. 12
      src/core/lib/surface/legacy_channel.h
  91. 224
      src/core/lib/surface/server_call.cc
  92. 167
      src/core/lib/surface/server_call.h
  93. 75
      src/core/lib/surface/wait_for_cq_end_op.cc
  94. 72
      src/core/lib/surface/wait_for_cq_end_op.h
  95. 171
      src/core/lib/transport/batch_builder.cc
  96. 474
      src/core/lib/transport/batch_builder.h
  97. 4
      src/core/lib/transport/call_filters.cc
  98. 384
      src/core/lib/transport/call_spine.h
  99. 2
      src/core/load_balancing/grpclb/grpclb.cc
  100. 13
      src/core/load_balancing/lb_policy.h
  101. Some files were not shown because too many files have changed in this diff Show More

31
BUILD

@ -910,6 +910,8 @@ grpc_cc_library(
external_deps = [
"absl/log:check",
"absl/log:log",
"absl/log:absl_check",
"absl/log:absl_log",
"absl/strings:cord",
"absl/synchronization",
"protobuf_headers",
@ -941,6 +943,8 @@ grpc_cc_library(
external_deps = [
"absl/log:check",
"absl/log:log",
"absl/log:absl_check",
"absl/log:absl_log",
"absl/strings:cord",
],
language = "c++",
@ -1258,6 +1262,8 @@ grpc_cc_library(
external_deps = [
"absl/log:check",
"absl/log:log",
"absl/log:absl_check",
"absl/log:absl_log",
"absl/strings",
"absl/synchronization",
],
@ -1800,6 +1806,7 @@ grpc_cc_library(
"stats",
"//src/core:arena",
"//src/core:call_arena_allocator",
"//src/core:call_destination",
"//src/core:channel_args",
"//src/core:channel_stack_type",
"//src/core:compression",
@ -1992,15 +1999,17 @@ grpc_cc_library(
"//src/core:lib/surface/call.cc",
"//src/core:lib/surface/call_details.cc",
"//src/core:lib/surface/call_log_batch.cc",
"//src/core:lib/surface/call_utils.cc",
"//src/core:lib/surface/client_call.cc",
"//src/core:lib/surface/completion_queue.cc",
"//src/core:lib/surface/completion_queue_factory.cc",
"//src/core:lib/surface/event_string.cc",
"//src/core:lib/surface/filter_stack_call.cc",
"//src/core:lib/surface/lame_client.cc",
"//src/core:lib/surface/metadata_array.cc",
"//src/core:lib/surface/server_call.cc",
"//src/core:lib/surface/validate_metadata.cc",
"//src/core:lib/surface/version.cc",
"//src/core:lib/surface/wait_for_cq_end_op.cc",
"//src/core:lib/transport/batch_builder.cc",
"//src/core:lib/transport/transport.cc",
"//src/core:lib/transport/transport_op_string.cc",
],
@ -2013,14 +2022,16 @@ grpc_cc_library(
"//src/core:lib/compression/message_compress.h",
"//src/core:lib/surface/call.h",
"//src/core:lib/surface/call_test_only.h",
"//src/core:lib/surface/call_utils.h",
"//src/core:lib/surface/client_call.h",
"//src/core:lib/surface/completion_queue.h",
"//src/core:lib/surface/completion_queue_factory.h",
"//src/core:lib/surface/event_string.h",
"//src/core:lib/surface/filter_stack_call.h",
"//src/core:lib/surface/init.h",
"//src/core:lib/surface/lame_client.h",
"//src/core:lib/surface/server_call.h",
"//src/core:lib/surface/validate_metadata.h",
"//src/core:lib/surface/wait_for_cq_end_op.h",
"//src/core:lib/transport/batch_builder.h",
"//src/core:lib/transport/transport.h",
],
defines = select({
@ -2033,8 +2044,8 @@ grpc_cc_library(
"absl/container:inlined_vector",
"absl/functional:any_invocable",
"absl/functional:function_ref",
"absl/log",
"absl/log:check",
"absl/log:log",
"absl/meta:type_traits",
"absl/status",
"absl/status:statusor",
@ -2065,6 +2076,7 @@ grpc_cc_library(
"debug_location",
"exec_ctx",
"gpr",
"grpc_core_credentials_header",
"grpc_public_hdrs",
"grpc_trace",
"iomgr",
@ -2125,6 +2137,7 @@ grpc_cc_library(
"//src/core:ref_counted",
"//src/core:seq",
"//src/core:server_interface",
"//src/core:single_set_ptr",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:slice_cast",
@ -2471,6 +2484,8 @@ grpc_cc_library(
"absl/functional:any_invocable",
"absl/log:check",
"absl/log:log",
"absl/log:absl_check",
"absl/log:absl_log",
"absl/status",
"absl/status:statusor",
"absl/strings",
@ -2559,6 +2574,8 @@ grpc_cc_library(
"absl/status:statusor",
"absl/strings",
"absl/synchronization",
"absl/log:absl_check",
"absl/log:absl_log",
"absl/types:optional",
"absl/memory",
"@com_google_protobuf//upb:base",
@ -3713,8 +3730,8 @@ grpc_cc_library(
"absl/container:flat_hash_set",
"absl/container:inlined_vector",
"absl/functional:any_invocable",
"absl/log",
"absl/log:check",
"absl/log:log",
"absl/status",
"absl/status:statusor",
"absl/strings",
@ -3751,7 +3768,6 @@ grpc_cc_library(
"stats",
"uri_parser",
"work_serializer",
"//src/core:activity",
"//src/core:arena",
"//src/core:arena_promise",
"//src/core:backend_metric_parser",
@ -3811,7 +3827,6 @@ grpc_cc_library(
"//src/core:slice_buffer",
"//src/core:slice_refcount",
"//src/core:stats_data",
"//src/core:status_flag",
"//src/core:status_helper",
"//src/core:subchannel_connector",
"//src/core:subchannel_interface",

571
CMakeLists.txt generated

File diff suppressed because it is too large Load Diff

6
Makefile generated

@ -1326,22 +1326,24 @@ LIBGRPC_SRC = \
src/core/lib/surface/call.cc \
src/core/lib/surface/call_details.cc \
src/core/lib/surface/call_log_batch.cc \
src/core/lib/surface/call_utils.cc \
src/core/lib/surface/channel.cc \
src/core/lib/surface/channel_create.cc \
src/core/lib/surface/channel_init.cc \
src/core/lib/surface/channel_stack_type.cc \
src/core/lib/surface/client_call.cc \
src/core/lib/surface/completion_queue.cc \
src/core/lib/surface/completion_queue_factory.cc \
src/core/lib/surface/event_string.cc \
src/core/lib/surface/filter_stack_call.cc \
src/core/lib/surface/init.cc \
src/core/lib/surface/init_internally.cc \
src/core/lib/surface/lame_client.cc \
src/core/lib/surface/legacy_channel.cc \
src/core/lib/surface/metadata_array.cc \
src/core/lib/surface/server_call.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
src/core/lib/surface/wait_for_cq_end_op.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/call_arena_allocator.cc \
src/core/lib/transport/call_filters.cc \

12
Package.swift generated

@ -1656,6 +1656,8 @@ let package = Package(
"src/core/lib/surface/call_log_batch.cc",
"src/core/lib/surface/call_test_only.h",
"src/core/lib/surface/call_trace.h",
"src/core/lib/surface/call_utils.cc",
"src/core/lib/surface/call_utils.h",
"src/core/lib/surface/channel.cc",
"src/core/lib/surface/channel.h",
"src/core/lib/surface/channel_create.cc",
@ -1664,12 +1666,16 @@ let package = Package(
"src/core/lib/surface/channel_init.h",
"src/core/lib/surface/channel_stack_type.cc",
"src/core/lib/surface/channel_stack_type.h",
"src/core/lib/surface/client_call.cc",
"src/core/lib/surface/client_call.h",
"src/core/lib/surface/completion_queue.cc",
"src/core/lib/surface/completion_queue.h",
"src/core/lib/surface/completion_queue_factory.cc",
"src/core/lib/surface/completion_queue_factory.h",
"src/core/lib/surface/event_string.cc",
"src/core/lib/surface/event_string.h",
"src/core/lib/surface/filter_stack_call.cc",
"src/core/lib/surface/filter_stack_call.h",
"src/core/lib/surface/init.cc",
"src/core/lib/surface/init.h",
"src/core/lib/surface/init_internally.cc",
@ -1679,13 +1685,11 @@ let package = Package(
"src/core/lib/surface/legacy_channel.cc",
"src/core/lib/surface/legacy_channel.h",
"src/core/lib/surface/metadata_array.cc",
"src/core/lib/surface/server_call.cc",
"src/core/lib/surface/server_call.h",
"src/core/lib/surface/validate_metadata.cc",
"src/core/lib/surface/validate_metadata.h",
"src/core/lib/surface/version.cc",
"src/core/lib/surface/wait_for_cq_end_op.cc",
"src/core/lib/surface/wait_for_cq_end_op.h",
"src/core/lib/transport/batch_builder.cc",
"src/core/lib/transport/batch_builder.h",
"src/core/lib/transport/bdp_estimator.cc",
"src/core/lib/transport/bdp_estimator.h",
"src/core/lib/transport/call_arena_allocator.cc",

@ -32,9 +32,7 @@ EXPERIMENT_ENABLES = {
"multiping": "multiping",
"peer_state_based_framing": "peer_state_based_framing",
"pick_first_new": "pick_first_new",
"promise_based_client_call": "event_engine_client,event_engine_listener,promise_based_client_call",
"chaotic_good": "chaotic_good,event_engine_client,event_engine_listener,promise_based_client_call",
"promise_based_inproc_transport": "event_engine_client,event_engine_listener,promise_based_client_call,promise_based_inproc_transport",
"promise_based_inproc_transport": "promise_based_inproc_transport",
"rstpit": "rstpit",
"schedule_cancellation_over_write": "schedule_cancellation_over_write",
"server_privacy": "server_privacy",
@ -44,7 +42,6 @@ EXPERIMENT_ENABLES = {
"unconstrained_max_quota_buffer_size": "unconstrained_max_quota_buffer_size",
"work_serializer_clears_time_cache": "work_serializer_clears_time_cache",
"work_serializer_dispatch": "event_engine_client,work_serializer_dispatch",
"call_v3": "call_v3,event_engine_client,event_engine_listener,work_serializer_dispatch",
}
EXPERIMENT_POLLERS = [
@ -141,9 +138,7 @@ EXPERIMENTS = {
},
"off": {
"core_end2end_test": [
"chaotic_good",
"event_engine_client",
"promise_based_client_call",
],
"endpoint_test": [
"tcp_frame_size_tuning",
@ -159,9 +154,6 @@ EXPERIMENTS = {
"tcp_frame_size_tuning",
"tcp_rcv_lowat",
],
"lame_client_test": [
"promise_based_client_call",
],
"resource_quota_test": [
"free_large_allocator",
"unconstrained_max_quota_buffer_size",

File diff suppressed because it is too large Load Diff

6
config.m4 generated

@ -701,22 +701,24 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/surface/call.cc \
src/core/lib/surface/call_details.cc \
src/core/lib/surface/call_log_batch.cc \
src/core/lib/surface/call_utils.cc \
src/core/lib/surface/channel.cc \
src/core/lib/surface/channel_create.cc \
src/core/lib/surface/channel_init.cc \
src/core/lib/surface/channel_stack_type.cc \
src/core/lib/surface/client_call.cc \
src/core/lib/surface/completion_queue.cc \
src/core/lib/surface/completion_queue_factory.cc \
src/core/lib/surface/event_string.cc \
src/core/lib/surface/filter_stack_call.cc \
src/core/lib/surface/init.cc \
src/core/lib/surface/init_internally.cc \
src/core/lib/surface/lame_client.cc \
src/core/lib/surface/legacy_channel.cc \
src/core/lib/surface/metadata_array.cc \
src/core/lib/surface/server_call.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
src/core/lib/surface/wait_for_cq_end_op.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/call_arena_allocator.cc \
src/core/lib/transport/call_filters.cc \

6
config.w32 generated

@ -666,22 +666,24 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\surface\\call.cc " +
"src\\core\\lib\\surface\\call_details.cc " +
"src\\core\\lib\\surface\\call_log_batch.cc " +
"src\\core\\lib\\surface\\call_utils.cc " +
"src\\core\\lib\\surface\\channel.cc " +
"src\\core\\lib\\surface\\channel_create.cc " +
"src\\core\\lib\\surface\\channel_init.cc " +
"src\\core\\lib\\surface\\channel_stack_type.cc " +
"src\\core\\lib\\surface\\client_call.cc " +
"src\\core\\lib\\surface\\completion_queue.cc " +
"src\\core\\lib\\surface\\completion_queue_factory.cc " +
"src\\core\\lib\\surface\\event_string.cc " +
"src\\core\\lib\\surface\\filter_stack_call.cc " +
"src\\core\\lib\\surface\\init.cc " +
"src\\core\\lib\\surface\\init_internally.cc " +
"src\\core\\lib\\surface\\lame_client.cc " +
"src\\core\\lib\\surface\\legacy_channel.cc " +
"src\\core\\lib\\surface\\metadata_array.cc " +
"src\\core\\lib\\surface\\server_call.cc " +
"src\\core\\lib\\surface\\validate_metadata.cc " +
"src\\core\\lib\\surface\\version.cc " +
"src\\core\\lib\\surface\\wait_for_cq_end_op.cc " +
"src\\core\\lib\\transport\\batch_builder.cc " +
"src\\core\\lib\\transport\\bdp_estimator.cc " +
"src\\core\\lib\\transport\\call_arena_allocator.cc " +
"src\\core\\lib\\transport\\call_filters.cc " +

@ -0,0 +1,50 @@
# Copyright 2023 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
licenses(["notice"])
cc_binary(
name = "csm_greeter_client",
srcs = ["csm_greeter_client.cc"],
defines = ["BAZEL_BUILD"],
deps = [
"//:grpc++",
"//:grpcpp_csm_observability",
"//examples/cpp/otel:util",
"//examples/protos:helloworld_cc_grpc",
"@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/flags:parse",
"@io_opentelemetry_cpp//exporters/prometheus:prometheus_exporter",
"@io_opentelemetry_cpp//sdk/src/metrics",
],
)
cc_binary(
name = "csm_greeter_server",
srcs = ["csm_greeter_server.cc"],
defines = ["BAZEL_BUILD"],
deps = [
"//:grpc++",
"//:grpc++_reflection",
"//:grpcpp_admin",
"//:grpcpp_csm_observability",
"//examples/cpp/otel:util",
"//examples/protos:helloworld_cc_grpc",
"@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/flags:parse",
"@com_google_absl//absl/log",
"@io_opentelemetry_cpp//exporters/prometheus:prometheus_exporter",
"@io_opentelemetry_cpp//sdk/src/metrics",
],
)

@ -0,0 +1,37 @@
# Copyright 2023 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.9-slim-bookworm
RUN apt-get update -y && apt-get upgrade -y && apt-get install -y build-essential clang curl
WORKDIR /workdir
RUN ln -s /usr/bin/python3 /usr/bin/python
RUN mkdir /artifacts
COPY . .
RUN tools/bazel build //examples/cpp/csm/observability:csm_greeter_client
RUN cp -rL /workdir/bazel-bin/examples/cpp/csm/observability/csm_greeter_client /artifacts/
FROM python:3.9-slim-bookworm
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get -y autoremove \
&& apt-get install -y curl
COPY --from=0 /artifacts ./
ENTRYPOINT ["/csm_greeter_client"]

@ -0,0 +1,37 @@
# Copyright 2023 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.9-slim-bookworm
RUN apt-get update -y && apt-get upgrade -y && apt-get install -y build-essential clang curl
WORKDIR /workdir
RUN ln -s /usr/bin/python3 /usr/bin/python
RUN mkdir /artifacts
COPY . .
RUN tools/bazel build //examples/cpp/csm/observability:csm_greeter_server
RUN cp -rL /workdir/bazel-bin/examples/cpp/csm/observability/csm_greeter_server /artifacts/
FROM python:3.9-slim-bookworm
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get -y autoremove \
&& apt-get install -y curl
COPY --from=0 /artifacts ./
ENTRYPOINT ["/csm_greeter_server"]

@ -0,0 +1,35 @@
# gRPC C++ CSM Hello World Example
This CSM example builds on the [Hello World Example](https://github.com/grpc/grpc/tree/master/examples/cpp/helloworld) and changes the gRPC client and server to test CSM observability.
## Configuration
The client takes the following command-line arguments -
* target - By default, the client tries to connect to the xDS "xds:///helloworld:50051" and gRPC would use xDS to resolve this target and connect to the server backend. This can be overridden to change the target.
* prometheus_endpoint - Endpoint used for prometheus. Default value is localhost:9464
The server takes the following command-line arguments -
* port - Port on which the Hello World service is run. Defaults to 50051.
* prometheus_endpoint - Endpoint used for prometheus. Default value is localhost:9464
## Building
From the gRPC workspace folder:
Client:
```
docker build -f examples/cpp/csm/observability/Dockerfile.client
```
Server:
```
docker build -f examples/cpp/csm/observability/Dockerfile.server
```
To push to a registry, add a tag to the image either by adding a `-t` flag to `docker build` command above or run:
```
docker image tag ${sha from build command above} ${tag}
```
And then push the tagged image using `docker push`

@ -0,0 +1,79 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <sys/types.h>
#include <chrono>
#include <condition_variable>
#include <iostream>
#include <memory>
#include <string>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/types/optional.h"
#include "examples/cpp/otel/util.h"
#include "opentelemetry/exporters/prometheus/exporter_factory.h"
#include "opentelemetry/exporters/prometheus/exporter_options.h"
#include "opentelemetry/sdk/metrics/meter_provider.h"
#include <grpcpp/ext/csm_observability.h>
#include <grpcpp/grpcpp.h>
#include <grpcpp/support/string_ref.h>
ABSL_FLAG(std::string, target, "xds:///helloworld:50051", "Target string");
ABSL_FLAG(std::string, prometheus_endpoint, "localhost:9464",
"Prometheus exporter endpoint");
namespace {
absl::StatusOr<grpc::CsmObservability> InitializeObservability() {
opentelemetry::exporter::metrics::PrometheusExporterOptions opts;
// default was "localhost:9464" which causes connection issue across GKE pods
opts.url = "0.0.0.0:9464";
auto prometheus_exporter =
opentelemetry::exporter::metrics::PrometheusExporterFactory::Create(opts);
auto meter_provider =
std::make_shared<opentelemetry::sdk::metrics::MeterProvider>();
// The default histogram boundaries are not granular enough for RPCs. Override
// the "grpc.client.attempt.duration" view as recommended by
// https://github.com/grpc/proposal/blob/master/A66-otel-stats.md.
AddLatencyView(meter_provider.get(), "grpc.client.attempt.duration", "s");
meter_provider->AddMetricReader(std::move(prometheus_exporter));
return grpc::CsmObservabilityBuilder()
.SetMeterProvider(std::move(meter_provider))
.BuildAndRegister();
}
} // namespace
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
// Setup CSM observability
auto observability = InitializeObservability();
if (!observability.ok()) {
std::cerr << "CsmObservability::Init() failed: "
<< observability.status().ToString() << std::endl;
return static_cast<int>(observability.status().code());
}
// Continuously send RPCs every second.
RunClient(absl::GetFlag(FLAGS_target));
return 0;
}

@ -0,0 +1,67 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <iostream>
#include <memory>
#include <string>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "examples/cpp/otel/util.h"
#include "opentelemetry/exporters/prometheus/exporter_factory.h"
#include "opentelemetry/exporters/prometheus/exporter_options.h"
#include "opentelemetry/sdk/metrics/meter_provider.h"
#include <grpcpp/ext/admin_services.h>
#include <grpcpp/ext/csm_observability.h>
#include <grpcpp/ext/proto_server_reflection_plugin.h>
#include <grpcpp/grpcpp.h>
#include <grpcpp/health_check_service_interface.h>
#include <grpcpp/xds_server_builder.h>
ABSL_FLAG(int32_t, port, 50051, "Server port for service.");
ABSL_FLAG(std::string, prometheus_endpoint, "localhost:9464",
"Prometheus exporter endpoint");
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
opentelemetry::exporter::metrics::PrometheusExporterOptions opts;
// default was "localhost:9464" which causes connection issue across GKE pods
opts.url = "0.0.0.0:9464";
auto prometheus_exporter =
opentelemetry::exporter::metrics::PrometheusExporterFactory::Create(opts);
auto meter_provider =
std::make_shared<opentelemetry::sdk::metrics::MeterProvider>();
// The default histogram boundaries are not granular enough for RPCs. Override
// the "grpc.server.call.duration" view as recommended by
// https://github.com/grpc/proposal/blob/master/A66-otel-stats.md.
AddLatencyView(meter_provider.get(), "grpc.server.call.duration", "s");
meter_provider->AddMetricReader(std::move(prometheus_exporter));
auto observability = grpc::CsmObservabilityBuilder()
.SetMeterProvider(std::move(meter_provider))
.BuildAndRegister();
if (!observability.ok()) {
std::cerr << "CsmObservability::Init() failed: "
<< observability.status().ToString() << std::endl;
return static_cast<int>(observability.status().code());
}
RunServer(absl::GetFlag(FLAGS_port));
return 0;
}

@ -15,7 +15,7 @@
licenses(["notice"])
package(
default_visibility = ["//examples/cpp/otel:__subpackages__"],
default_visibility = ["//examples/cpp:__subpackages__"],
)
cc_library(

14
gRPC-C++.podspec generated

@ -244,6 +244,8 @@ Pod::Spec.new do |s|
ss.dependency 'abseil/functional/bind_front', abseil_version
ss.dependency 'abseil/functional/function_ref', abseil_version
ss.dependency 'abseil/hash/hash', abseil_version
ss.dependency 'abseil/log/absl_check', abseil_version
ss.dependency 'abseil/log/absl_log', abseil_version
ss.dependency 'abseil/log/check', abseil_version
ss.dependency 'abseil/log/globals', abseil_version
ss.dependency 'abseil/log/log', abseil_version
@ -1176,20 +1178,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h',
@ -2450,20 +2454,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h',

18
gRPC-Core.podspec generated

@ -1771,6 +1771,8 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.cc',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.cc',
@ -1779,12 +1781,16 @@ Pod::Spec.new do |s|
'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.cc',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.cc',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.cc',
@ -1794,13 +1800,11 @@ Pod::Spec.new do |s|
'src/core/lib/surface/legacy_channel.cc',
'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server_call.cc',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/version.cc',
'src/core/lib/surface/wait_for_cq_end_op.cc',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.cc',
@ -3232,20 +3236,22 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
'src/core/lib/surface/channel_init.h',
'src/core/lib/surface/channel_stack_type.h',
'src/core/lib/surface/client_call.h',
'src/core/lib/surface/completion_queue.h',
'src/core/lib/surface/completion_queue_factory.h',
'src/core/lib/surface/event_string.h',
'src/core/lib/surface/filter_stack_call.h',
'src/core/lib/surface/init.h',
'src/core/lib/surface/init_internally.h',
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/legacy_channel.h',
'src/core/lib/surface/server_call.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/wait_for_cq_end_op.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/call_arena_allocator.h',
'src/core/lib/transport/call_destination.h',

12
grpc.gemspec generated

@ -1658,6 +1658,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/call_log_batch.cc )
s.files += %w( src/core/lib/surface/call_test_only.h )
s.files += %w( src/core/lib/surface/call_trace.h )
s.files += %w( src/core/lib/surface/call_utils.cc )
s.files += %w( src/core/lib/surface/call_utils.h )
s.files += %w( src/core/lib/surface/channel.cc )
s.files += %w( src/core/lib/surface/channel.h )
s.files += %w( src/core/lib/surface/channel_create.cc )
@ -1666,12 +1668,16 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/channel_init.h )
s.files += %w( src/core/lib/surface/channel_stack_type.cc )
s.files += %w( src/core/lib/surface/channel_stack_type.h )
s.files += %w( src/core/lib/surface/client_call.cc )
s.files += %w( src/core/lib/surface/client_call.h )
s.files += %w( src/core/lib/surface/completion_queue.cc )
s.files += %w( src/core/lib/surface/completion_queue.h )
s.files += %w( src/core/lib/surface/completion_queue_factory.cc )
s.files += %w( src/core/lib/surface/completion_queue_factory.h )
s.files += %w( src/core/lib/surface/event_string.cc )
s.files += %w( src/core/lib/surface/event_string.h )
s.files += %w( src/core/lib/surface/filter_stack_call.cc )
s.files += %w( src/core/lib/surface/filter_stack_call.h )
s.files += %w( src/core/lib/surface/init.cc )
s.files += %w( src/core/lib/surface/init.h )
s.files += %w( src/core/lib/surface/init_internally.cc )
@ -1681,13 +1687,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/legacy_channel.cc )
s.files += %w( src/core/lib/surface/legacy_channel.h )
s.files += %w( src/core/lib/surface/metadata_array.cc )
s.files += %w( src/core/lib/surface/server_call.cc )
s.files += %w( src/core/lib/surface/server_call.h )
s.files += %w( src/core/lib/surface/validate_metadata.cc )
s.files += %w( src/core/lib/surface/validate_metadata.h )
s.files += %w( src/core/lib/surface/version.cc )
s.files += %w( src/core/lib/surface/wait_for_cq_end_op.cc )
s.files += %w( src/core/lib/surface/wait_for_cq_end_op.h )
s.files += %w( src/core/lib/transport/batch_builder.cc )
s.files += %w( src/core/lib/transport/batch_builder.h )
s.files += %w( src/core/lib/transport/bdp_estimator.cc )
s.files += %w( src/core/lib/transport/bdp_estimator.h )
s.files += %w( src/core/lib/transport/call_arena_allocator.cc )

@ -272,6 +272,9 @@
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GRPC_CFSTREAM 1
#ifndef GRPC_IOS_EVENT_ENGINE_CLIENT
#define GRPC_IOS_EVENT_ENGINE_CLIENT 1
#endif /* GRPC_IOS_EVENT_ENGINE_CLIENT */
/* the c-ares resolver isn't safe to enable on iOS */
#define GRPC_ARES 0
#else /* TARGET_OS_IPHONE */

@ -38,7 +38,7 @@
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/impl/compression_types.h>
#include <grpc/impl/propagation_bits.h>
@ -248,7 +248,7 @@ class ClientContext {
/// \return A multimap of initial metadata key-value pairs from the server.
const std::multimap<grpc::string_ref, grpc::string_ref>&
GetServerInitialMetadata() const {
CHECK(initial_metadata_received_);
ABSL_CHECK(initial_metadata_received_);
return *recv_initial_metadata_.map();
}

@ -34,7 +34,7 @@
#include <list>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/support/atm.h>
@ -325,7 +325,7 @@ class CompletionQueue : private grpc::internal::GrpcLibrary {
bool ok = ev.success != 0;
void* ignored = tag;
if (tag->FinalizeResult(&ignored, &ok)) {
CHECK(ignored == tag);
ABSL_CHECK(ignored == tag);
return ok;
}
}
@ -346,7 +346,7 @@ class CompletionQueue : private grpc::internal::GrpcLibrary {
bool ok = ev.success != 0;
void* ignored = tag;
// the tag must be swallowed if using TryPluck
CHECK(!tag->FinalizeResult(&ignored, &ok));
ABSL_CHECK(!tag->FinalizeResult(&ignored, &ok));
}
/// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
@ -363,7 +363,7 @@ class CompletionQueue : private grpc::internal::GrpcLibrary {
bool ok = ev.success != 0;
void* ignored = tag;
CHECK(!tag->FinalizeResult(&ignored, &ok));
ABSL_CHECK(!tag->FinalizeResult(&ignored, &ok));
}
/// Manage state of avalanching operations : completion queue tags that

@ -23,8 +23,8 @@
#include <map>
#include <memory>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include <grpc/grpc.h>
#include <grpc/impl/compression_types.h>
@ -318,7 +318,7 @@ class CallOpSendMessage {
return;
}
if (msg_ != nullptr) {
CHECK(serializer_(msg_).ok());
ABSL_CHECK(serializer_(msg_).ok());
}
serializer_ = nullptr;
grpc_op* op = &ops[(*nops)++];
@ -799,7 +799,7 @@ class CallOpClientRecvStatus {
if (recv_status_ == nullptr || hijacked_) return;
if (static_cast<StatusCode>(status_code_) == StatusCode::OK) {
*recv_status_ = Status();
DCHECK_EQ(debug_error_string_, nullptr);
ABSL_DCHECK_EQ(debug_error_string_, nullptr);
} else {
*recv_status_ =
Status(static_cast<StatusCode>(status_code_),
@ -976,9 +976,9 @@ class CallOpSet : public CallOpSetInterface,
// A failure here indicates an API misuse; for example, doing a Write
// while another Write is already pending on the same RPC or invoking
// WritesDone multiple times
LOG(ERROR) << "API misuse of type " << grpc_call_error_to_string(err)
<< " observed";
CHECK(false);
ABSL_LOG(ERROR) << "API misuse of type " << grpc_call_error_to_string(err)
<< " observed";
ABSL_CHECK(false);
}
}
@ -988,8 +988,8 @@ class CallOpSet : public CallOpSetInterface,
done_intercepting_ = true;
// The following call_start_batch is internally-generated so no need for an
// explanatory log on failure.
CHECK(grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag(),
nullptr) == GRPC_CALL_OK);
ABSL_CHECK(grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag(),
nullptr) == GRPC_CALL_OK);
}
private:

@ -22,7 +22,7 @@
#include <array>
#include <functional>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/impl/grpc_types.h>
#include <grpc/support/log.h>
@ -58,15 +58,16 @@ class InterceptorBatchMethodsImpl
if (call_->client_rpc_info() != nullptr) {
return ProceedClient();
}
CHECK_NE(call_->server_rpc_info(), nullptr);
ABSL_CHECK_NE(call_->server_rpc_info(), nullptr);
ProceedServer();
}
void Hijack() override {
// Only the client can hijack when sending down initial metadata
CHECK(!reverse_ && ops_ != nullptr && call_->client_rpc_info() != nullptr);
ABSL_CHECK(!reverse_ && ops_ != nullptr &&
call_->client_rpc_info() != nullptr);
// It is illegal to call Hijack twice
CHECK(!ran_hijacking_interceptor_);
ABSL_CHECK(!ran_hijacking_interceptor_);
auto* rpc_info = call_->client_rpc_info();
rpc_info->hijacked_ = true;
rpc_info->hijacked_interceptor_ = current_interceptor_index_;
@ -81,21 +82,21 @@ class InterceptorBatchMethodsImpl
}
ByteBuffer* GetSerializedSendMessage() override {
CHECK_NE(orig_send_message_, nullptr);
ABSL_CHECK_NE(orig_send_message_, nullptr);
if (*orig_send_message_ != nullptr) {
CHECK(serializer_(*orig_send_message_).ok());
ABSL_CHECK(serializer_(*orig_send_message_).ok());
*orig_send_message_ = nullptr;
}
return send_message_;
}
const void* GetSendMessage() override {
CHECK_NE(orig_send_message_, nullptr);
ABSL_CHECK_NE(orig_send_message_, nullptr);
return *orig_send_message_;
}
void ModifySendMessage(const void* message) override {
CHECK_NE(orig_send_message_, nullptr);
ABSL_CHECK_NE(orig_send_message_, nullptr);
*orig_send_message_ = message;
}
@ -130,7 +131,7 @@ class InterceptorBatchMethodsImpl
Status* GetRecvStatus() override { return recv_status_; }
void FailHijackedSendMessage() override {
CHECK(hooks_[static_cast<size_t>(
ABSL_CHECK(hooks_[static_cast<size_t>(
experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)]);
*fail_send_message_ = true;
}
@ -193,7 +194,7 @@ class InterceptorBatchMethodsImpl
}
void FailHijackedRecvMessage() override {
CHECK(hooks_[static_cast<size_t>(
ABSL_CHECK(hooks_[static_cast<size_t>(
experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)]);
*hijacked_recv_message_failed_ = true;
}
@ -237,7 +238,7 @@ class InterceptorBatchMethodsImpl
// ContinueFinalizeOpsAfterInterception will be called. Note that neither of
// them is invoked if there were no interceptors registered.
bool RunInterceptors() {
CHECK(ops_);
ABSL_CHECK(ops_);
auto* client_rpc_info = call_->client_rpc_info();
if (client_rpc_info != nullptr) {
if (client_rpc_info->interceptors_.empty()) {
@ -262,8 +263,8 @@ class InterceptorBatchMethodsImpl
// SyncRequest.
bool RunInterceptors(std::function<void(void)> f) {
// This is used only by the server for initial call request
CHECK_EQ(reverse_, true);
CHECK_EQ(call_->client_rpc_info(), nullptr);
ABSL_CHECK_EQ(reverse_, true);
ABSL_CHECK_EQ(call_->client_rpc_info(), nullptr);
auto* server_rpc_info = call_->server_rpc_info();
if (server_rpc_info == nullptr || server_rpc_info->interceptors_.empty()) {
return true;
@ -357,7 +358,7 @@ class InterceptorBatchMethodsImpl
return ops_->ContinueFinalizeResultAfterInterception();
}
}
CHECK(callback_);
ABSL_CHECK(callback_);
callback_();
}
@ -423,98 +424,103 @@ class CancelInterceptorBatchMethods
void Hijack() override {
// Only the client can hijack when sending down initial metadata
CHECK(false) << "It is illegal to call Hijack on a method which has a "
"Cancel notification";
ABSL_CHECK(false) << "It is illegal to call Hijack on a method which has a "
"Cancel notification";
}
ByteBuffer* GetSerializedSendMessage() override {
CHECK(false) << "It is illegal to call GetSendMessage on a method which "
"has a Cancel notification";
ABSL_CHECK(false)
<< "It is illegal to call GetSendMessage on a method which "
"has a Cancel notification";
return nullptr;
}
bool GetSendMessageStatus() override {
CHECK(false)
ABSL_CHECK(false)
<< "It is illegal to call GetSendMessageStatus on a method which "
"has a Cancel notification";
return false;
}
const void* GetSendMessage() override {
CHECK(false)
ABSL_CHECK(false)
<< "It is illegal to call GetOriginalSendMessage on a method which "
"has a Cancel notification";
return nullptr;
}
void ModifySendMessage(const void* /*message*/) override {
CHECK(false) << "It is illegal to call ModifySendMessage on a method which "
"has a Cancel notification";
ABSL_CHECK(false)
<< "It is illegal to call ModifySendMessage on a method which "
"has a Cancel notification";
}
std::multimap<std::string, std::string>* GetSendInitialMetadata() override {
CHECK(false) << "It is illegal to call GetSendInitialMetadata on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call GetSendInitialMetadata on a "
"method which has a Cancel notification";
return nullptr;
}
Status GetSendStatus() override {
CHECK(false) << "It is illegal to call GetSendStatus on a method which "
"has a Cancel notification";
ABSL_CHECK(false)
<< "It is illegal to call GetSendStatus on a method which "
"has a Cancel notification";
return Status();
}
void ModifySendStatus(const Status& /*status*/) override {
CHECK(false) << "It is illegal to call ModifySendStatus on a method "
"which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call ModifySendStatus on a method "
"which has a Cancel notification";
}
std::multimap<std::string, std::string>* GetSendTrailingMetadata() override {
CHECK(false) << "It is illegal to call GetSendTrailingMetadata on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call GetSendTrailingMetadata on a "
"method which has a Cancel notification";
return nullptr;
}
void* GetRecvMessage() override {
CHECK(false) << "It is illegal to call GetRecvMessage on a method which "
"has a Cancel notification";
ABSL_CHECK(false)
<< "It is illegal to call GetRecvMessage on a method which "
"has a Cancel notification";
return nullptr;
}
std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvInitialMetadata()
override {
CHECK(false) << "It is illegal to call GetRecvInitialMetadata on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call GetRecvInitialMetadata on a "
"method which has a Cancel notification";
return nullptr;
}
Status* GetRecvStatus() override {
CHECK(false) << "It is illegal to call GetRecvStatus on a method which "
"has a Cancel notification";
ABSL_CHECK(false)
<< "It is illegal to call GetRecvStatus on a method which "
"has a Cancel notification";
return nullptr;
}
std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvTrailingMetadata()
override {
CHECK(false) << "It is illegal to call GetRecvTrailingMetadata on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call GetRecvTrailingMetadata on a "
"method which has a Cancel notification";
return nullptr;
}
std::unique_ptr<ChannelInterface> GetInterceptedChannel() override {
CHECK(false) << "It is illegal to call GetInterceptedChannel on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call GetInterceptedChannel on a "
"method which has a Cancel notification";
return std::unique_ptr<ChannelInterface>(nullptr);
}
void FailHijackedRecvMessage() override {
CHECK(false) << "It is illegal to call FailHijackedRecvMessage on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call FailHijackedRecvMessage on a "
"method which has a Cancel notification";
}
void FailHijackedSendMessage() override {
CHECK(false) << "It is illegal to call FailHijackedSendMessage on a "
"method which has a Cancel notification";
ABSL_CHECK(false) << "It is illegal to call FailHijackedSendMessage on a "
"method which has a Cancel notification";
}
};
} // namespace internal

@ -21,7 +21,7 @@
#include <type_traits>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/byte_buffer_reader.h>
#include <grpc/impl/grpc_types.h>
@ -53,8 +53,8 @@ Status GenericSerialize(const grpc::protobuf::MessageLite& msg, ByteBuffer* bb,
if (static_cast<size_t>(byte_size) <= GRPC_SLICE_INLINED_SIZE) {
Slice slice(byte_size);
// We serialize directly into the allocated slices memory
CHECK(slice.end() == msg.SerializeWithCachedSizesToArray(
const_cast<uint8_t*>(slice.begin())));
ABSL_CHECK(slice.end() == msg.SerializeWithCachedSizesToArray(
const_cast<uint8_t*>(slice.begin())));
ByteBuffer tmp(&slice, 1);
bb->Swap(&tmp);

@ -25,8 +25,8 @@
#include <memory>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include <grpcpp/impl/rpc_method.h>
#include <grpcpp/support/byte_buffer.h>
@ -77,7 +77,7 @@ class MethodHandler {
// retained by the handler. Returns nullptr if deserialization failed.
virtual void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req,
Status* /*status*/, void** /*handler_data*/) {
CHECK_EQ(req, nullptr);
ABSL_CHECK_EQ(req, nullptr);
return nullptr;
}
};
@ -116,12 +116,12 @@ class RpcServiceMethod : public RpcMethod {
// this is not an error condition, as it allows users to declare a server
// like WithRawMethod_foo<AsyncService>. However since it
// overwrites behavior, it should be logged.
LOG(INFO) << "You are marking method " << name() << " as '"
<< TypeToString(api_type_)
<< "', even though it was previously marked '"
<< TypeToString(type)
<< "'. This behavior will overwrite the original behavior. If "
"you expected this then ignore this message.";
ABSL_LOG(INFO)
<< "You are marking method " << name() << " as '"
<< TypeToString(api_type_)
<< "', even though it was previously marked '" << TypeToString(type)
<< "'. This behavior will overwrite the original behavior. If "
"you expected this then ignore this message.";
}
api_type_ = type;
}

@ -18,7 +18,7 @@
#ifndef GRPCPP_IMPL_SERVER_CALLBACK_HANDLERS_H
#define GRPCPP_IMPL_SERVER_CALLBACK_HANDLERS_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
@ -149,7 +149,7 @@ class CallbackUnaryHandler : public grpc::internal::MethodHandler {
}
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
this->Ref();
// The callback for this function should not be marked inline because it
// is directly invoking a user-controlled reaction
@ -337,7 +337,7 @@ class CallbackClientStreamingHandler : public grpc::internal::MethodHandler {
}
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
@ -541,7 +541,7 @@ class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
}
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
@ -579,7 +579,7 @@ class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
ctx_->sent_initial_metadata_ = true;
}
// TODO(vjpai): don't assert
CHECK(write_ops_.SendMessagePtr(resp, options).ok());
ABSL_CHECK(write_ops_.SendMessagePtr(resp, options).ok());
call_.PerformOps(&write_ops_);
}
@ -587,7 +587,7 @@ class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
grpc::Status s) override {
// This combines the write into the finish callback
// TODO(vjpai): don't assert
CHECK(finish_ops_.SendMessagePtr(resp, options).ok());
ABSL_CHECK(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}
@ -753,7 +753,7 @@ class CallbackBidiHandler : public grpc::internal::MethodHandler {
}
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
@ -791,14 +791,14 @@ class CallbackBidiHandler : public grpc::internal::MethodHandler {
ctx_->sent_initial_metadata_ = true;
}
// TODO(vjpai): don't assert
CHECK(write_ops_.SendMessagePtr(resp, options).ok());
ABSL_CHECK(write_ops_.SendMessagePtr(resp, options).ok());
call_.PerformOps(&write_ops_);
}
void WriteAndFinish(const ResponseType* resp, grpc::WriteOptions options,
grpc::Status s) override {
// TODO(vjpai): don't assert
CHECK(finish_ops_.SendMessagePtr(resp, options).ok());
ABSL_CHECK(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_IMPL_SERVICE_TYPE_H
#define GRPCPP_IMPL_SERVICE_TYPE_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/support/log.h>
#include <grpcpp/impl/rpc_service_method.h>
@ -152,7 +152,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK_NE(methods_[idx].get(), nullptr)
ABSL_CHECK_NE(methods_[idx].get(), nullptr)
<< "Cannot mark the method as 'async' because it has already been "
"marked as 'generic'.";
methods_[idx]->SetServerApiType(internal::RpcServiceMethod::ApiType::ASYNC);
@ -162,7 +162,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK_NE(methods_[idx].get(), nullptr)
ABSL_CHECK_NE(methods_[idx].get(), nullptr)
<< "Cannot mark the method as 'raw' because it has already "
"been marked as 'generic'.";
methods_[idx]->SetServerApiType(internal::RpcServiceMethod::ApiType::RAW);
@ -172,7 +172,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK_NE(methods_[idx]->handler(), nullptr)
ABSL_CHECK_NE(methods_[idx]->handler(), nullptr)
<< "Cannot mark the method as 'generic' because it has already been "
"marked as 'async' or 'raw'.";
methods_[idx].reset();
@ -182,7 +182,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK(methods_[idx] && methods_[idx]->handler())
ABSL_CHECK(methods_[idx] && methods_[idx]->handler())
<< "Cannot mark an async or generic method Streamed";
methods_[idx]->SetHandler(streamed_method);
@ -197,7 +197,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK_NE(methods_[idx].get(), nullptr)
ABSL_CHECK_NE(methods_[idx].get(), nullptr)
<< "Cannot mark the method as 'callback' because it has already been "
"marked as 'generic'.";
methods_[idx]->SetHandler(handler);
@ -209,7 +209,7 @@ class Service {
// This does not have to be a hard error, however no one has approached us
// with a use case yet. Please file an issue if you believe you have one.
size_t idx = static_cast<size_t>(index);
CHECK_NE(methods_[idx].get(), nullptr)
ABSL_CHECK_NE(methods_[idx].get(), nullptr)
<< "Cannot mark the method as 'raw callback' because it has already "
"been marked as 'generic'.";
methods_[idx]->SetHandler(handler);

@ -27,7 +27,7 @@
#include <mutex>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include "absl/synchronization/mutex.h"
#include <grpc/support/log.h>
@ -106,7 +106,7 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
void Release() ABSL_UNLOCK_FUNCTION() {
DCHECK(!released_);
ABSL_DCHECK(!released_);
released_ = true;
mu_->Unlock();
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_SERVER_INTERFACE_H
#define GRPCPP_SERVER_INTERFACE_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/impl/grpc_types.h>
@ -320,7 +320,7 @@ class ServerInterface : public internal::CallHook {
grpc::CompletionQueue* call_cq,
grpc::ServerCompletionQueue* notification_cq, void* tag,
Message* message) {
CHECK(method);
ABSL_CHECK(method);
new PayloadAsyncRequest<Message>(method, this, context, stream, call_cq,
notification_cq, tag, message);
}
@ -331,7 +331,7 @@ class ServerInterface : public internal::CallHook {
grpc::CompletionQueue* call_cq,
grpc::ServerCompletionQueue* notification_cq,
void* tag) {
CHECK(method);
ABSL_CHECK(method);
new NoPayloadAsyncRequest(method, this, context, stream, call_cq,
notification_cq, tag);
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_SUPPORT_ASYNC_STREAM_H
#define GRPCPP_SUPPORT_ASYNC_STREAM_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/support/log.h>
@ -202,7 +202,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientAsyncReader));
ABSL_CHECK_EQ(size, sizeof(ClientAsyncReader));
}
// This operator should never be called as the memory should be freed as part
@ -210,10 +210,10 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall(void* tag) override {
CHECK(!started_);
ABSL_CHECK(!started_);
started_ = true;
StartCallInternal(tag);
}
@ -227,8 +227,8 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// calling code can access the received metadata through the
/// \a ClientContext.
void ReadInitialMetadata(void* tag) override {
CHECK(started_);
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(started_);
ABSL_CHECK(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
@ -236,7 +236,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
void Read(R* msg, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -251,7 +251,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata received from the server.
void Finish(grpc::Status* status, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -267,12 +267,12 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
const W& request, bool start, void* tag)
: context_(context), call_(call), started_(start) {
// TODO(ctiller): don't assert
CHECK(init_ops_.SendMessage(request).ok());
ABSL_CHECK(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
if (start) {
StartCallInternal(tag);
} else {
CHECK(tag == nullptr);
ABSL_CHECK(tag == nullptr);
}
}
@ -350,7 +350,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientAsyncWriter));
ABSL_CHECK_EQ(size, sizeof(ClientAsyncWriter));
}
// This operator should never be called as the memory should be freed as part
@ -358,10 +358,10 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall(void* tag) override {
CHECK(!started_);
ABSL_CHECK(!started_);
started_ = true;
StartCallInternal(tag);
}
@ -374,8 +374,8 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// associated with this call is updated, and the calling code can access
/// the received metadata through the \a ClientContext.
void ReadInitialMetadata(void* tag) override {
CHECK(started_);
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(started_);
ABSL_CHECK(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
@ -383,27 +383,27 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
void Write(const W& msg, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg).ok());
ABSL_CHECK(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void Write(const W& msg, grpc::WriteOptions options, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
if (options.is_last_message()) {
options.set_buffer_hint();
write_ops_.ClientSendClose();
}
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_);
@ -417,7 +417,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// - attempts to fill in the \a response parameter passed to this class's
/// constructor with the server's response message.
void Finish(grpc::Status* status, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -437,7 +437,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
if (start) {
StartCallInternal(tag);
} else {
CHECK(tag == nullptr);
ABSL_CHECK(tag == nullptr);
}
}
@ -517,7 +517,7 @@ class ClientAsyncReaderWriter final
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientAsyncReaderWriter));
ABSL_CHECK_EQ(size, sizeof(ClientAsyncReaderWriter));
}
// This operator should never be called as the memory should be freed as part
@ -525,10 +525,10 @@ class ClientAsyncReaderWriter final
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall(void* tag) override {
CHECK(!started_);
ABSL_CHECK(!started_);
started_ = true;
StartCallInternal(tag);
}
@ -541,8 +541,8 @@ class ClientAsyncReaderWriter final
/// is updated with it, and then the receiving initial metadata can
/// be accessed through this \a ClientContext.
void ReadInitialMetadata(void* tag) override {
CHECK(started_);
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(started_);
ABSL_CHECK(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
@ -550,7 +550,7 @@ class ClientAsyncReaderWriter final
}
void Read(R* msg, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -560,27 +560,27 @@ class ClientAsyncReaderWriter final
}
void Write(const W& msg, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg).ok());
ABSL_CHECK(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void Write(const W& msg, grpc::WriteOptions options, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
if (options.is_last_message()) {
options.set_buffer_hint();
write_ops_.ClientSendClose();
}
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_);
@ -591,7 +591,7 @@ class ClientAsyncReaderWriter final
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void Finish(grpc::Status* status, void* tag) override {
CHECK(started_);
ABSL_CHECK(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -608,7 +608,7 @@ class ClientAsyncReaderWriter final
if (start) {
StartCallInternal(tag);
} else {
CHECK(tag == nullptr);
ABSL_CHECK(tag == nullptr);
}
}
@ -708,7 +708,7 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
/// - The initial metadata that will be sent to the client from this op will
/// be taken from the \a ServerContext associated with the call.
void SendInitialMetadata(void* tag) override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
@ -767,7 +767,7 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once FinishWithError returns.
void FinishWithError(const grpc::Status& status, void* tag) override {
CHECK(!status.ok());
ABSL_CHECK(!status.ok());
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
@ -857,7 +857,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
///
/// \param[in] tag Tag identifying this request.
void SendInitialMetadata(void* tag) override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
@ -873,7 +873,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
write_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&write_ops_);
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg).ok());
ABSL_CHECK(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
@ -885,7 +885,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
EnsureInitialMetadataSent(&write_ops_);
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
call_.PerformOps(&write_ops_);
}
@ -904,7 +904,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
write_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&write_ops_);
options.set_buffer_hint();
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&write_ops_);
}
@ -1023,7 +1023,7 @@ class ServerAsyncReaderWriter final
///
/// \param[in] tag Tag identifying this request.
void SendInitialMetadata(void* tag) override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
@ -1045,7 +1045,7 @@ class ServerAsyncReaderWriter final
write_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&write_ops_);
// TODO(ctiller): don't assert
CHECK(write_ops_.SendMessage(msg).ok());
ABSL_CHECK(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
@ -1055,7 +1055,7 @@ class ServerAsyncReaderWriter final
options.set_buffer_hint();
}
EnsureInitialMetadataSent(&write_ops_);
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
call_.PerformOps(&write_ops_);
}
@ -1075,7 +1075,7 @@ class ServerAsyncReaderWriter final
write_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&write_ops_);
options.set_buffer_hint();
CHECK(write_ops_.SendMessage(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessage(msg, options).ok());
write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&write_ops_);
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_SUPPORT_ASYNC_UNARY_CALL_H
#define GRPCPP_SUPPORT_ASYNC_UNARY_CALL_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/support/log.h>
@ -132,7 +132,7 @@ class ClientAsyncResponseReaderHelper {
new (grpc_call_arena_alloc(call, sizeof(SingleBufType))) SingleBufType;
*single_buf_ptr = single_buf;
// TODO(ctiller): don't assert
CHECK(single_buf->SendMessage(request).ok());
ABSL_CHECK(single_buf->SendMessage(request).ok());
single_buf->ClientSendClose();
// The purpose of the following functions is to type-erase the actual
@ -222,7 +222,7 @@ class ClientAsyncResponseReader final
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientAsyncResponseReader));
ABSL_CHECK_EQ(size, sizeof(ClientAsyncResponseReader));
}
// This operator should never be called as the memory should be freed as part
@ -230,10 +230,10 @@ class ClientAsyncResponseReader final
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall() override {
DCHECK(!started_);
ABSL_DCHECK(!started_);
started_ = true;
internal::ClientAsyncResponseReaderHelper::StartCall(context_, single_buf_);
}
@ -245,8 +245,8 @@ class ClientAsyncResponseReader final
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void ReadInitialMetadata(void* tag) override {
DCHECK(started_);
DCHECK(!context_->initial_metadata_received_);
ABSL_DCHECK(started_);
ABSL_DCHECK(!context_->initial_metadata_received_);
read_initial_metadata_(context_, &call_, single_buf_, tag);
initial_metadata_read_ = true;
}
@ -257,7 +257,7 @@ class ClientAsyncResponseReader final
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void Finish(R* msg, grpc::Status* status, void* tag) override {
DCHECK(started_);
ABSL_DCHECK(started_);
finish_(context_, &call_, initial_metadata_read_, single_buf_, &finish_buf_,
static_cast<void*>(msg), status, tag);
}
@ -306,7 +306,7 @@ class ServerAsyncResponseWriter final
///
/// \param[in] tag Tag identifying this request.
void SendInitialMetadata(void* tag) override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
meta_buf_.set_output_tag(tag);
meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
@ -375,7 +375,7 @@ class ServerAsyncResponseWriter final
/// deallocate them once the Finish operation is complete (i.e. a result
/// arrives in the completion queue).
void FinishWithError(const grpc::Status& status, void* tag) {
CHECK(!status.ok());
ABSL_CHECK(!status.ok());
finish_buf_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,

@ -21,7 +21,7 @@
#include <functional>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/impl/grpc_types.h>
@ -73,7 +73,7 @@ class CallbackWithStatusTag : public grpc_completion_queue_functor {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(CallbackWithStatusTag));
ABSL_CHECK_EQ(size, sizeof(CallbackWithStatusTag));
}
// This operator should never be called as the memory should be freed as part
@ -81,7 +81,7 @@ class CallbackWithStatusTag : public grpc_completion_queue_functor {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
CallbackWithStatusTag(grpc_call* call, std::function<void(Status)> f,
CompletionQueueTag* ops)
@ -120,7 +120,7 @@ class CallbackWithStatusTag : public grpc_completion_queue_functor {
// The tag was swallowed
return;
}
CHECK(ignored == ops_);
ABSL_CHECK(ignored == ops_);
// Last use of func_ or status_, so ok to move them out
auto func = std::move(func_);
@ -139,7 +139,7 @@ class CallbackWithSuccessTag : public grpc_completion_queue_functor {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(CallbackWithSuccessTag));
ABSL_CHECK_EQ(size, sizeof(CallbackWithSuccessTag));
}
// This operator should never be called as the memory should be freed as part
@ -147,7 +147,7 @@ class CallbackWithSuccessTag : public grpc_completion_queue_functor {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
CallbackWithSuccessTag() : call_(nullptr) {}
@ -164,7 +164,7 @@ class CallbackWithSuccessTag : public grpc_completion_queue_functor {
// callbacks.
void Set(grpc_call* call, std::function<void(bool)> f,
CompletionQueueTag* ops, bool can_inline) {
CHECK_EQ(call_, nullptr);
ABSL_CHECK_EQ(call_, nullptr);
grpc_call_ref(call);
call_ = call;
func_ = std::move(f);
@ -210,7 +210,7 @@ class CallbackWithSuccessTag : public grpc_completion_queue_functor {
#endif
bool do_callback = ops_->FinalizeResult(&ignored, &ok);
#ifndef NDEBUG
DCHECK(ignored == ops);
ABSL_DCHECK(ignored == ops);
#endif
if (do_callback) {

@ -22,7 +22,7 @@
#include <atomic>
#include <functional>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
@ -72,7 +72,7 @@ class CallbackUnaryCallImpl {
const InputMessage* request, OutputMessage* result,
std::function<void(grpc::Status)> on_completion) {
grpc::CompletionQueue* cq = channel->CallbackCQ();
CHECK_NE(cq, nullptr);
ABSL_CHECK_NE(cq, nullptr);
grpc::internal::Call call(channel->CreateCall(method, context, cq));
using FullCallOpSet = grpc::internal::CallOpSet<
@ -306,7 +306,7 @@ class ClientBidiReactor : public internal::ClientReactor {
/// The argument to AddMultipleHolds must be positive.
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) {
DCHECK_GT(holds, 0);
ABSL_DCHECK_GT(holds, 0);
stream_->AddHold(holds);
}
void RemoveHold() { stream_->RemoveHold(); }
@ -370,7 +370,7 @@ class ClientReadReactor : public internal::ClientReactor {
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) {
DCHECK_GT(holds, 0);
ABSL_DCHECK_GT(holds, 0);
reader_->AddHold(holds);
}
void RemoveHold() { reader_->RemoveHold(); }
@ -402,7 +402,7 @@ class ClientWriteReactor : public internal::ClientReactor {
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) {
DCHECK_GT(holds, 0);
ABSL_DCHECK_GT(holds, 0);
writer_->AddHold(holds);
}
void RemoveHold() { writer_->RemoveHold(); }
@ -463,7 +463,7 @@ class ClientCallbackReaderWriterImpl
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientCallbackReaderWriterImpl));
ABSL_CHECK_EQ(size, sizeof(ClientCallbackReaderWriterImpl));
}
// This operator should never be called as the memory should be freed as part
@ -471,7 +471,7 @@ class ClientCallbackReaderWriterImpl
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall() ABSL_LOCKS_EXCLUDED(start_mu_) override {
// This call initiates two batches, plus any backlog, each with a callback
@ -529,7 +529,7 @@ class ClientCallbackReaderWriterImpl
write_ops_.ClientSendClose();
}
// TODO(vjpai): don't assert
CHECK(write_ops_.SendMessagePtr(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessagePtr(msg, options).ok());
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
@ -721,7 +721,7 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientCallbackReaderImpl));
ABSL_CHECK_EQ(size, sizeof(ClientCallbackReaderImpl));
}
// This operator should never be called as the memory should be freed as part
@ -729,7 +729,7 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall() override {
// This call initiates two batches, plus any backlog, each with a callback
@ -806,7 +806,7 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
: context_(context), call_(call), reactor_(reactor) {
this->BindReactor(reactor);
// TODO(vjpai): don't assert
CHECK(start_ops_.SendMessagePtr(request).ok());
ABSL_CHECK(start_ops_.SendMessagePtr(request).ok());
start_ops_.ClientSendClose();
}
@ -882,7 +882,7 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientCallbackWriterImpl));
ABSL_CHECK_EQ(size, sizeof(ClientCallbackWriterImpl));
}
// This operator should never be called as the memory should be freed as part
@ -890,7 +890,7 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall() ABSL_LOCKS_EXCLUDED(start_mu_) override {
// This call initiates two batches, plus any backlog, each with a callback
@ -931,7 +931,7 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
write_ops_.ClientSendClose();
}
// TODO(vjpai): don't assert
CHECK(write_ops_.SendMessagePtr(msg, options).ok());
ABSL_CHECK(write_ops_.SendMessagePtr(msg, options).ok());
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
@ -1112,7 +1112,7 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* /*ptr*/, std::size_t size) {
CHECK_EQ(size, sizeof(ClientCallbackUnaryImpl));
ABSL_CHECK_EQ(size, sizeof(ClientCallbackUnaryImpl));
}
// This operator should never be called as the memory should be freed as part
@ -1120,7 +1120,7 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { CHECK(false); }
static void operator delete(void*, void*) { ABSL_CHECK(false); }
void StartCall() override {
// This call initiates two batches, each with a callback
@ -1159,7 +1159,7 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
: context_(context), call_(call), reactor_(reactor) {
this->BindReactor(reactor);
// TODO(vjpai): don't assert
CHECK(start_ops_.SendMessagePtr(request).ok());
ABSL_CHECK(start_ops_.SendMessagePtr(request).ok());
start_ops_.ClientSendClose();
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();

@ -22,7 +22,7 @@
#include <memory>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/support/log.h>
#include <grpcpp/impl/rpc_method.h>
@ -141,7 +141,7 @@ class ClientRpcInfo {
// Runs interceptor at pos \a pos.
void RunInterceptor(
experimental::InterceptorBatchMethods* interceptor_methods, size_t pos) {
CHECK_LT(pos, interceptors_.size());
ABSL_CHECK_LT(pos, interceptors_.size());
interceptors_[pos]->Intercept(interceptor_methods);
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_SUPPORT_METHOD_HANDLER_H
#define GRPCPP_SUPPORT_METHOD_HANDLER_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/log.h>
@ -59,7 +59,7 @@ template <class Callable>
template <class ResponseType>
void UnaryRunHandlerHelper(const MethodHandler::HandlerParameter& param,
ResponseType* rsp, grpc::Status& status) {
CHECK(!param.server_context->sent_initial_metadata_);
ABSL_CHECK(!param.server_context->sent_initial_metadata_);
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
grpc::internal::CallOpSendMessage,
grpc::internal::CallOpServerSendStatus>

@ -21,7 +21,7 @@
#include <type_traits>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include "absl/strings/cord.h"
#include <grpc/byte_buffer.h>
@ -76,7 +76,7 @@ class ProtoBufferReader : public grpc::protobuf::io::ZeroCopyInputStream {
if (backup_count_ > 0) {
*data = GRPC_SLICE_START_PTR(*slice_) + GRPC_SLICE_LENGTH(*slice_) -
backup_count_;
CHECK_LE(backup_count_, INT_MAX);
ABSL_CHECK_LE(backup_count_, INT_MAX);
*size = static_cast<int>(backup_count_);
backup_count_ = 0;
return true;
@ -87,7 +87,7 @@ class ProtoBufferReader : public grpc::protobuf::io::ZeroCopyInputStream {
}
*data = GRPC_SLICE_START_PTR(*slice_);
// On win x64, int is only 32bit
CHECK_LE(GRPC_SLICE_LENGTH(*slice_), static_cast<size_t>(INT_MAX));
ABSL_CHECK_LE(GRPC_SLICE_LENGTH(*slice_), static_cast<size_t>(INT_MAX));
byte_count_ += * size = static_cast<int>(GRPC_SLICE_LENGTH(*slice_));
return true;
}
@ -99,7 +99,7 @@ class ProtoBufferReader : public grpc::protobuf::io::ZeroCopyInputStream {
/// bytes that have already been returned by the last call of Next.
/// So do the backup and have that ready for a later Next.
void BackUp(int count) override {
CHECK_LE(count, static_cast<int>(GRPC_SLICE_LENGTH(*slice_)));
ABSL_CHECK_LE(count, static_cast<int>(GRPC_SLICE_LENGTH(*slice_)));
backup_count_ = count;
}
@ -175,7 +175,7 @@ class ProtoBufferReader : public grpc::protobuf::io::ZeroCopyInputStream {
return true;
}
}
CHECK_EQ(count, 0);
ABSL_CHECK_EQ(count, 0);
return true;
}
#endif // GRPC_PROTOBUF_CORD_SUPPORT_ENABLED

@ -21,7 +21,7 @@
#include <type_traits>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include "absl/strings/cord.h"
#include <grpc/byte_buffer.h>
@ -65,7 +65,7 @@ class ProtoBufferWriter : public grpc::protobuf::io::ZeroCopyOutputStream {
total_size_(total_size),
byte_count_(0),
have_backup_(false) {
CHECK(!byte_buffer->Valid());
ABSL_CHECK(!byte_buffer->Valid());
/// Create an empty raw byte buffer and look at its underlying slice buffer
grpc_byte_buffer* bp = grpc_raw_byte_buffer_create(nullptr, 0);
byte_buffer->set_buffer(bp);
@ -82,7 +82,7 @@ class ProtoBufferWriter : public grpc::protobuf::io::ZeroCopyOutputStream {
/// safe for the caller to write from data[0, size - 1].
bool Next(void** data, int* size) override {
// Protobuf should not ask for more memory than total_size_.
CHECK_LT(byte_count_, total_size_);
ABSL_CHECK_LT(byte_count_, total_size_);
// 1. Use the remaining backup slice if we have one
// 2. Otherwise allocate a slice, up to the remaining length needed
// or our maximum allocation size
@ -107,7 +107,7 @@ class ProtoBufferWriter : public grpc::protobuf::io::ZeroCopyOutputStream {
}
*data = GRPC_SLICE_START_PTR(slice_);
// On win x64, int is only 32bit
CHECK(GRPC_SLICE_LENGTH(slice_) <= static_cast<size_t>(INT_MAX));
ABSL_CHECK(GRPC_SLICE_LENGTH(slice_) <= static_cast<size_t>(INT_MAX));
byte_count_ += * size = static_cast<int>(GRPC_SLICE_LENGTH(slice_));
// Using grpc_slice_buffer_add could modify slice_ and merge it with the
// previous slice. Therefore, use grpc_slice_buffer_add_indexed method to
@ -132,7 +132,7 @@ class ProtoBufferWriter : public grpc::protobuf::io::ZeroCopyOutputStream {
/// 2. Split it into the needed (if any) and unneeded part
/// 3. Add the needed part back to the slice buffer
/// 4. Mark that we still have the remaining part (for later use/unref)
CHECK_LE(count, static_cast<int>(GRPC_SLICE_LENGTH(slice_)));
ABSL_CHECK_LE(count, static_cast<int>(GRPC_SLICE_LENGTH(slice_)));
grpc_slice_buffer_pop(slice_buffer_);
if (static_cast<size_t>(count) == GRPC_SLICE_LENGTH(slice_)) {
backup_slice_ = slice_;

@ -22,7 +22,7 @@
#include <atomic>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/support/log.h>
#include <grpcpp/impl/rpc_method.h>
@ -102,7 +102,7 @@ class ServerRpcInfo {
// Runs interceptor at pos \a pos.
void RunInterceptor(
experimental::InterceptorBatchMethods* interceptor_methods, size_t pos) {
CHECK_LT(pos, interceptors_.size());
ABSL_CHECK_LT(pos, interceptors_.size());
interceptors_[pos]->Intercept(interceptor_methods);
}

@ -19,7 +19,7 @@
#ifndef GRPCPP_SUPPORT_SYNC_STREAM_H
#define GRPCPP_SUPPORT_SYNC_STREAM_H
#include "absl/log/check.h"
#include "absl/log/absl_check.h"
#include <grpc/support/log.h>
#include <grpcpp/client_context.h>
@ -186,7 +186,7 @@ class ClientReader final : public ClientReaderInterface<R> {
/// the server will be accessible through the \a ClientContext used to
/// construct this object.
void WaitForInitialMetadata() override {
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(!context_->initial_metadata_received_);
grpc::internal::CallOpSet<grpc::internal::CallOpRecvInitialMetadata> ops;
ops.RecvInitialMetadata(context_);
@ -232,7 +232,7 @@ class ClientReader final : public ClientReaderInterface<R> {
grpc::Status status;
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
CHECK(cq_.Pluck(&ops));
ABSL_CHECK(cq_.Pluck(&ops));
return status;
}
@ -261,7 +261,7 @@ class ClientReader final : public ClientReaderInterface<R> {
ops.SendInitialMetadata(&context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
CHECK(ops.SendMessagePtr(&request).ok());
ABSL_CHECK(ops.SendMessagePtr(&request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
@ -308,7 +308,7 @@ class ClientWriter : public ClientWriterInterface<W> {
/// Once complete, the initial metadata read from the server will be
/// accessible through the \a ClientContext used to construct this object.
void WaitForInitialMetadata() {
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(!context_->initial_metadata_received_);
grpc::internal::CallOpSet<grpc::internal::CallOpRecvInitialMetadata> ops;
ops.RecvInitialMetadata(context_);
@ -366,7 +366,7 @@ class ClientWriter : public ClientWriterInterface<W> {
}
finish_ops_.ClientRecvStatus(context_, &status);
call_.PerformOps(&finish_ops_);
CHECK(cq_.Pluck(&finish_ops_));
ABSL_CHECK(cq_.Pluck(&finish_ops_));
return status;
}
@ -457,7 +457,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Once complete, the initial metadata read from the server will be
/// accessible through the \a ClientContext used to construct this object.
void WaitForInitialMetadata() override {
CHECK(!context_->initial_metadata_received_);
ABSL_CHECK(!context_->initial_metadata_received_);
grpc::internal::CallOpSet<grpc::internal::CallOpRecvInitialMetadata> ops;
ops.RecvInitialMetadata(context_);
@ -538,7 +538,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
grpc::Status status;
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
CHECK(cq_.Pluck(&ops));
ABSL_CHECK(cq_.Pluck(&ops));
return status;
}
@ -585,7 +585,7 @@ class ServerReader final : public ServerReaderInterface<R> {
/// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(&ctx_->initial_metadata_,
@ -642,7 +642,7 @@ class ServerWriter final : public ServerWriterInterface<W> {
/// Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
void SendInitialMetadata() override {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(&ctx_->initial_metadata_,
@ -715,7 +715,7 @@ class ServerReaderWriterBody final {
: call_(call), ctx_(ctx) {}
void SendInitialMetadata() {
CHECK(!ctx_->sent_initial_metadata_);
ABSL_CHECK(!ctx_->sent_initial_metadata_);
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(&ctx_->initial_metadata_,

12
package.xml generated

@ -1640,6 +1640,8 @@
<file baseinstalldir="/" name="src/core/lib/surface/call_log_batch.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_test_only.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_create.cc" role="src" />
@ -1648,12 +1650,16 @@
<file baseinstalldir="/" name="src/core/lib/surface/channel_init.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel_stack_type.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/client_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/client_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/completion_queue_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/event_string.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/event_string.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/filter_stack_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/filter_stack_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init_internally.cc" role="src" />
@ -1663,13 +1669,11 @@
<file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/legacy_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/metadata_array.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/server_call.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/server_call.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/version.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/wait_for_cq_end_op.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/wait_for_cq_end_op.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/call_arena_allocator.cc" role="src" />

@ -48,64 +48,47 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/client_channel/backup_poller.h"
#include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/client_channel_service_config.h"
#include "src/core/client_channel/config_selector.h"
#include "src/core/client_channel/dynamic_filters.h"
#include "src/core/client_channel/global_subchannel_pool.h"
#include "src/core/client_channel/local_subchannel_pool.h"
#include "src/core/client_channel/retry_filter.h"
#include "src/core/client_channel/subchannel.h"
#include "src/core/client_channel/subchannel_interface_internal.h"
#include "src/core/ext/filters/channel_idle/legacy_channel_idle_filter.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/promise_based_filter.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/unique_type_name.h"
#include "src/core/lib/gprpp/work_serializer.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolved_address.h"
#include "src/core/lib/promise/cancel_callback.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/exec_ctx_wakeup_scheduler.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/loop.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/promise.h"
#include "src/core/lib/promise/sleep.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/client_call.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/call_spine.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/load_balancing/backend_metric_parser.h"
#include "src/core/load_balancing/child_policy_handler.h"
#include "src/core/load_balancing/lb_policy.h"
#include "src/core/load_balancing/lb_policy_registry.h"
#include "src/core/load_balancing/subchannel_interface.h"
#include "src/core/resolver/endpoint_addresses.h"
#include "src/core/resolver/resolver_registry.h"
#include "src/core/service_config/service_config_call_data.h"
#include "src/core/service_config/service_config_impl.h"
#include "src/core/telemetry/metrics.h"
#include "src/core/util/json/json.h"
@ -129,7 +112,8 @@ extern TraceFlag grpc_client_channel_lb_call_trace;
class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
public:
explicit ResolverResultHandler(RefCountedPtr<ClientChannel> client_channel)
explicit ResolverResultHandler(
WeakRefCountedPtr<ClientChannel> client_channel)
: client_channel_(std::move(client_channel)) {}
~ResolverResultHandler() override {
@ -145,7 +129,7 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
}
private:
RefCountedPtr<ClientChannel> client_channel_;
WeakRefCountedPtr<ClientChannel> client_channel_;
};
//
@ -163,7 +147,7 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
class ClientChannel::SubchannelWrapper
: public SubchannelInterfaceWithCallDestination {
public:
SubchannelWrapper(RefCountedPtr<ClientChannel> client_channel,
SubchannelWrapper(WeakRefCountedPtr<ClientChannel> client_channel,
RefCountedPtr<Subchannel> subchannel);
~SubchannelWrapper() override;
@ -210,7 +194,7 @@ class ClientChannel::SubchannelWrapper
}
};
RefCountedPtr<ClientChannel> client_channel_;
WeakRefCountedPtr<ClientChannel> client_channel_;
RefCountedPtr<Subchannel> subchannel_;
// Maps from the address of the watcher passed to us by the LB policy
// to the address of the WrapperWatcher that we passed to the underlying
@ -333,7 +317,7 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
};
ClientChannel::SubchannelWrapper::SubchannelWrapper(
RefCountedPtr<ClientChannel> client_channel,
WeakRefCountedPtr<ClientChannel> client_channel,
RefCountedPtr<Subchannel> subchannel)
: SubchannelInterfaceWithCallDestination(
GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)
@ -450,7 +434,7 @@ class ClientChannel::ClientChannelControlHelper
: public LoadBalancingPolicy::ChannelControlHelper {
public:
explicit ClientChannelControlHelper(
RefCountedPtr<ClientChannel> client_channel)
WeakRefCountedPtr<ClientChannel> client_channel)
: client_channel_(std::move(client_channel)) {}
~ClientChannelControlHelper() override {
@ -552,7 +536,7 @@ class ClientChannel::ClientChannelControlHelper
return channelz::ChannelTrace::Error;
}
RefCountedPtr<ClientChannel> client_channel_;
WeakRefCountedPtr<ClientChannel> client_channel_;
};
//
@ -571,9 +555,8 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
} // namespace
absl::StatusOr<OrphanablePtr<Channel>> ClientChannel::Create(
absl::StatusOr<RefCountedPtr<Channel>> ClientChannel::Create(
std::string target, ChannelArgs channel_args) {
gpr_log(GPR_ERROR, "ARGS: %s", channel_args.ToString().c_str());
// Get URI to resolve, using proxy mapper if needed.
if (target.empty()) {
return absl::InternalError("target URI is empty in client channel");
@ -617,7 +600,7 @@ absl::StatusOr<OrphanablePtr<Channel>> ClientChannel::Create(
"Missing event engine in args for client channel");
}
// Success. Construct channel.
return MakeOrphanable<ClientChannel>(
return MakeRefCounted<ClientChannel>(
std::move(target), std::move(channel_args), std::move(uri_to_resolve),
std::move(*default_service_config), client_channel_factory,
call_destination_factory);
@ -684,11 +667,14 @@ ClientChannel::~ClientChannel() {
}
}
void ClientChannel::Orphan() {
void ClientChannel::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: shutting down", this);
}
auto self = RefAsSubclass<ClientChannel>();
// Weird capture then copy needed to satisfy thread safety analysis,
// otherwise it seems to fail to recognize the correct lock is taken in the
// lambda.
auto self = WeakRefAsSubclass<ClientChannel>();
work_serializer_->Run(
[self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) {
self->DestroyResolverAndLbPolicyLocked();
@ -698,7 +684,6 @@ void ClientChannel::Orphan() {
// timer from being reset by other threads.
idle_state_.IncreaseCallCount();
idle_activity_.Reset();
Unref();
}
grpc_connectivity_state ClientChannel::CheckConnectivityState(
@ -710,7 +695,7 @@ grpc_connectivity_state ClientChannel::CheckConnectivityState(
grpc_connectivity_state state =
ABSL_TS_UNCHECKED_READ(state_tracker_).state();
if (state == GRPC_CHANNEL_IDLE && try_to_connect) {
auto self = RefAsSubclass<ClientChannel>(); // Held by callback.
auto self = WeakRefAsSubclass<ClientChannel>(); // Held by callback.
work_serializer_->Run(
[self]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) {
self->TryToConnectLocked();
@ -801,34 +786,26 @@ void ClientChannel::Ping(grpc_completion_queue*, void*) {
Crash("not implemented");
}
grpc_call* ClientChannel::CreateCall(grpc_call*, uint32_t,
grpc_completion_queue*, grpc_pollset_set*,
Slice, absl::optional<Slice>, Timestamp,
bool) {
// TODO(ctiller): code to convert from C-core batch API to v3 call, then
// invoke CreateCall(client_initial_metadata, arena)
// TODO(ctiller): make sure call holds a ref to ClientChannel for its entire
// lifetime
Crash("not implemented");
return nullptr;
grpc_call* ClientChannel::CreateCall(
grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, grpc_pollset_set* /*pollset_set_alternative*/,
Slice path, absl::optional<Slice> authority, Timestamp deadline, bool) {
return MakeClientCall(parent_call, propagation_mask, cq, std::move(path),
std::move(authority), false, deadline,
compression_options(), event_engine_.get(),
call_arena_allocator()->MakeArena(), Ref());
}
CallInitiator ClientChannel::CreateCall(
ClientMetadataHandle client_initial_metadata) {
void ClientChannel::StartCall(UnstartedCallHandler unstarted_handler) {
// Increment call count.
if (idle_timeout_ != Duration::Zero()) idle_state_.IncreaseCallCount();
// Exit IDLE if needed.
CheckConnectivityState(/*try_to_connect=*/true);
// Create an initiator/unstarted-handler pair.
auto call =
MakeCallPair(std::move(client_initial_metadata), event_engine_.get(),
call_arena_allocator()->MakeArena());
// Spawn a promise to wait for the resolver result.
// This will eventually start the call.
call.initiator.SpawnGuardedUntilCallCompletes(
unstarted_handler.SpawnGuardedUntilCallCompletes(
"wait-for-name-resolution",
[self = RefAsSubclass<ClientChannel>(),
unstarted_handler = std::move(call.handler)]() mutable {
[self = RefAsSubclass<ClientChannel>(), unstarted_handler]() mutable {
const bool wait_for_ready =
unstarted_handler.UnprocessedClientInitialMetadata()
.GetOrCreatePointer(WaitForReady())
@ -878,8 +855,6 @@ CallInitiator ClientChannel::CreateCall(
return absl::OkStatus();
});
});
// Return the initiator.
return std::move(call.initiator);
}
void ClientChannel::CreateResolverLocked() {
@ -889,7 +864,8 @@ void ClientChannel::CreateResolverLocked() {
}
resolver_ = CoreConfiguration::Get().resolver_registry().CreateResolver(
uri_to_resolve_, channel_args_, nullptr, work_serializer_,
std::make_unique<ResolverResultHandler>(RefAsSubclass<ClientChannel>()));
std::make_unique<ResolverResultHandler>(
WeakRefAsSubclass<ClientChannel>()));
// Since the validity of the args was checked when the channel was created,
// CreateResolver() must return a non-null result.
CHECK(resolver_ != nullptr);
@ -919,7 +895,8 @@ void ClientChannel::DestroyResolverAndLbPolicyLocked() {
lb_policy_.get());
}
lb_policy_.reset();
picker_.Set(nullptr);
picker_.Set(MakeRefCounted<LoadBalancingPolicy::DropPicker>(
absl::UnavailableError("Channel shutdown")));
}
}
}
@ -1165,8 +1142,8 @@ absl::Status ClientChannel::CreateOrUpdateLbPolicyLocked(
update_args.config = std::move(lb_policy_config);
update_args.resolution_note = std::move(result.resolution_note);
// Remove the config selector from channel args so that we're not holding
// unnecessary refs that cause it to be destroyed somewhere other than in the
// WorkSerializer.
// unnecessary refs that cause it to be destroyed somewhere other than in
// the WorkSerializer.
update_args.args = result.args.Remove(GRPC_ARG_CONFIG_SELECTOR);
// Add health check service name to channel args.
if (health_check_service_name.has_value()) {
@ -1200,7 +1177,7 @@ OrphanablePtr<LoadBalancingPolicy> ClientChannel::CreateLbPolicyLocked(
lb_policy_args.work_serializer = work_serializer_;
lb_policy_args.channel_control_helper =
std::make_unique<ClientChannelControlHelper>(
RefAsSubclass<ClientChannel>());
WeakRefAsSubclass<ClientChannel>());
lb_policy_args.args = args;
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
@ -1305,7 +1282,7 @@ void ClientChannel::StartIdleTimer() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: idle timer started", this);
}
auto self = RefAsSubclass<ClientChannel>();
auto self = WeakRefAsSubclass<ClientChannel>();
auto promise = Loop([self]() {
return TrySeq(Sleep(Timestamp::Now() + self->idle_timeout_),
[self]() -> Poll<LoopCtl<absl::Status>> {
@ -1359,7 +1336,7 @@ absl::Status ClientChannel::ApplyServiceConfigToCall(
return MaybeRewriteIllegalStatusCode(call_config_status, "ConfigSelector");
}
// Apply our own method params to the call.
auto* method_params = static_cast<ClientChannelMethodParsedConfig*>(
auto* method_params = DownCast<ClientChannelMethodParsedConfig*>(
service_config_call_data->GetMethodParsedConfig(
service_config_parser_index_));
if (method_params != nullptr) {

@ -57,7 +57,7 @@ class ClientChannel : public Channel {
~CallDestinationFactory() = default;
};
static absl::StatusOr<OrphanablePtr<Channel>> Create(
static absl::StatusOr<RefCountedPtr<Channel>> Create(
std::string target, ChannelArgs channel_args);
// Do not instantiate directly -- use Create() instead.
@ -69,7 +69,7 @@ class ClientChannel : public Channel {
~ClientChannel() override;
void Orphan() override;
void Orphaned() override;
grpc_call* CreateCall(grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq,
@ -77,7 +77,7 @@ class ClientChannel : public Channel {
Slice path, absl::optional<Slice> authority,
Timestamp deadline, bool registered_method) override;
CallInitiator CreateCall(ClientMetadataHandle client_initial_metadata);
void StartCall(UnstartedCallHandler unstarted_handler) override;
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_.get();

@ -304,119 +304,12 @@ class ClientChannelFilter::FilterBasedCallData final
grpc_error_handle cancel_error_;
};
class ClientChannelFilter::PromiseBasedCallData final
: public ClientChannelFilter::CallData {
public:
explicit PromiseBasedCallData(ClientChannelFilter* chand) : chand_(chand) {}
~PromiseBasedCallData() override {
if (was_queued_ && client_initial_metadata_ != nullptr) {
MutexLock lock(&chand_->resolution_mu_);
RemoveCallFromResolverQueuedCallsLocked();
chand_->resolver_queued_calls_.erase(this);
}
}
ArenaPromise<absl::StatusOr<CallArgs>> MakeNameResolutionPromise(
CallArgs call_args) {
pollent_ = NowOrNever(call_args.polling_entity->WaitAndCopy()).value();
client_initial_metadata_ = std::move(call_args.client_initial_metadata);
// If we're still in IDLE, we need to start resolving.
if (GPR_UNLIKELY(chand_->CheckConnectivityState(false) ==
GRPC_CHANNEL_IDLE)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: %striggering exit idle", chand_,
this, GetContext<Activity>()->DebugTag().c_str());
}
// Bounce into the control plane work serializer to start resolving.
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ExitIdle");
chand_->work_serializer_->Run(
[chand = chand_]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*chand_->work_serializer_) {
chand->CheckConnectivityState(/*try_to_connect=*/true);
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_, "ExitIdle");
},
DEBUG_LOCATION);
}
return [this, call_args = std::move(
call_args)]() mutable -> Poll<absl::StatusOr<CallArgs>> {
auto result = CheckResolution(was_queued_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: %sCheckResolution returns %s",
chand_, this, GetContext<Activity>()->DebugTag().c_str(),
result.has_value() ? result->ToString().c_str() : "Pending");
}
if (!result.has_value()) return Pending{};
if (!result->ok()) return *result;
call_args.client_initial_metadata = std::move(client_initial_metadata_);
return std::move(call_args);
};
}
private:
ClientChannelFilter* chand() const override { return chand_; }
Arena* arena() const override { return GetContext<Arena>(); }
grpc_polling_entity* pollent() override { return &pollent_; }
grpc_metadata_batch* send_initial_metadata() override {
return client_initial_metadata_.get();
}
void OnAddToQueueLocked() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannelFilter::resolution_mu_) {
waker_ = GetContext<Activity>()->MakeNonOwningWaker();
was_queued_ = true;
}
void RetryCheckResolutionLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
&ClientChannelFilter::resolution_mu_) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: RetryCheckResolutionLocked(): %s",
chand_, this, waker_.ActivityDebugTag().c_str());
}
waker_.WakeupAsync();
}
void ResetDeadline(Duration timeout) override {
Call* call = GetContext<Call>();
CallContext* call_context = GetContext<CallContext>();
const Timestamp per_method_deadline =
Timestamp::FromCycleCounterRoundUp(call_context->call_start_time()) +
timeout;
call->UpdateDeadline(per_method_deadline);
}
ClientChannelFilter* chand_;
grpc_polling_entity pollent_;
ClientMetadataHandle client_initial_metadata_;
bool was_queued_ = false;
Waker waker_ ABSL_GUARDED_BY(&ClientChannelFilter::resolution_mu_);
};
//
// Filter vtable
//
const grpc_channel_filter ClientChannelFilter::kFilterVtableWithPromises = {
const grpc_channel_filter ClientChannelFilter::kFilter = {
ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch,
ClientChannelFilter::MakeCallPromise,
/* init_call: */ nullptr,
ClientChannelFilter::StartTransportOp,
sizeof(ClientChannelFilter::FilterBasedCallData),
ClientChannelFilter::FilterBasedCallData::Init,
ClientChannelFilter::FilterBasedCallData::SetPollent,
ClientChannelFilter::FilterBasedCallData::Destroy,
sizeof(ClientChannelFilter),
ClientChannelFilter::Init,
grpc_channel_stack_no_post_init,
ClientChannelFilter::Destroy,
ClientChannelFilter::GetChannelInfo,
"client-channel",
};
const grpc_channel_filter ClientChannelFilter::kFilterVtableWithoutPromises = {
ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch,
nullptr,
/* init_call: */ nullptr,
ClientChannelFilter::StartTransportOp,
sizeof(ClientChannelFilter::FilterBasedCallData),
ClientChannelFilter::FilterBasedCallData::Init,
@ -466,19 +359,6 @@ class DynamicTerminationFilter final {
static void GetChannelInfo(grpc_channel_element* /*elem*/,
const grpc_channel_info* /*info*/) {}
static ArenaPromise<ServerMetadataHandle> MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args, NextPromiseFactory) {
auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
return chand->chand_->CreateLoadBalancedCallPromise(
std::move(call_args),
[]() {
auto* service_config_call_data =
GetServiceConfigCallData(GetContext<Arena>());
service_config_call_data->Commit();
},
/*is_transparent_retry=*/false);
}
private:
explicit DynamicTerminationFilter(const ChannelArgs& args)
: chand_(args.GetObject<ClientChannelFilter>()) {}
@ -559,8 +439,6 @@ class DynamicTerminationFilter::CallData final {
const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
DynamicTerminationFilter::CallData::StartTransportStreamOpBatch,
DynamicTerminationFilter::MakeCallPromise,
/* init_call: */ nullptr,
DynamicTerminationFilter::StartTransportOp,
sizeof(DynamicTerminationFilter::CallData),
DynamicTerminationFilter::CallData::Init,
@ -1190,8 +1068,7 @@ class ClientChannelFilter::ClientChannelControlHelper final
grpc_error_handle ClientChannelFilter::Init(grpc_channel_element* elem,
grpc_channel_element_args* args) {
CHECK(args->is_last);
CHECK(elem->filter == &kFilterVtableWithPromises ||
elem->filter == &kFilterVtableWithoutPromises);
CHECK(elem->filter == &kFilter);
grpc_error_handle error;
new (elem->channel_data) ClientChannelFilter(args, &error);
return error;
@ -1308,21 +1185,6 @@ ClientChannelFilter::~ClientChannelFilter() {
grpc_pollset_set_destroy(interested_parties_);
}
ArenaPromise<ServerMetadataHandle> ClientChannelFilter::MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args, NextPromiseFactory) {
auto* chand = static_cast<ClientChannelFilter*>(elem->channel_data);
// TODO(roth): Is this the right lifetime story for calld?
auto* calld = GetContext<Arena>()->ManagedNew<PromiseBasedCallData>(chand);
return TrySeq(
// Name resolution.
calld->MakeNameResolutionPromise(std::move(call_args)),
// Dynamic filter stack.
[calld](CallArgs call_args) mutable {
return calld->dynamic_filters()->channel_stack()->MakeClientCallPromise(
std::move(call_args));
});
}
OrphanablePtr<ClientChannelFilter::FilterBasedLoadBalancedCall>
ClientChannelFilter::CreateLoadBalancedCall(
const grpc_call_element_args& args, grpc_polling_entity* pollent,
@ -1335,17 +1197,6 @@ ClientChannelFilter::CreateLoadBalancedCall(
std::move(on_commit), is_transparent_retry));
}
ArenaPromise<ServerMetadataHandle>
ClientChannelFilter::CreateLoadBalancedCallPromise(
CallArgs call_args, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry) {
OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call(
GetContext<Arena>()->New<PromiseBasedLoadBalancedCall>(
this, std::move(on_commit), is_transparent_retry));
auto* call_ptr = lb_call.get();
return call_ptr->MakeCallPromise(std::move(call_args), std::move(lb_call));
}
void ClientChannelFilter::ReprocessQueuedResolverCalls() {
for (CallData* calld : resolver_queued_calls_) {
calld->RemoveCallFromResolverQueuedCallsLocked();
@ -3443,7 +3294,7 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::CreateSubchannelCall() {
connected_subchannel()->Ref(), pollent_, path->Ref(), /*start_time=*/0,
arena()->GetContext<Call>()->deadline(),
// TODO(roth): When we implement hedging support, we will probably
// need to use a separate arena for each subchannel call.
// need to use a separate call arena for each subchannel call.
arena(), call_combiner_};
grpc_error_handle error;
subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
@ -3463,146 +3314,4 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::CreateSubchannelCall() {
}
}
//
// ClientChannelFilter::PromiseBasedLoadBalancedCall
//
ClientChannelFilter::PromiseBasedLoadBalancedCall::PromiseBasedLoadBalancedCall(
ClientChannelFilter* chand, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry)
: LoadBalancedCall(chand, GetContext<Arena>(), std::move(on_commit),
is_transparent_retry) {}
ArenaPromise<ServerMetadataHandle>
ClientChannelFilter::PromiseBasedLoadBalancedCall::MakeCallPromise(
CallArgs call_args, OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call) {
pollent_ = NowOrNever(call_args.polling_entity->WaitAndCopy()).value();
// Record ops in tracer.
if (call_attempt_tracer() != nullptr) {
call_attempt_tracer()->RecordSendInitialMetadata(
call_args.client_initial_metadata.get());
// TODO(ctiller): Find a way to do this without registering a no-op mapper.
call_args.client_to_server_messages->InterceptAndMapWithHalfClose(
[](MessageHandle message) { return message; }, // No-op.
[this]() {
// TODO(roth): Change CallTracer API to not pass metadata
// batch to this method, since the batch is always empty.
grpc_metadata_batch metadata;
call_attempt_tracer()->RecordSendTrailingMetadata(&metadata);
});
}
// Extract peer name from server initial metadata.
call_args.server_initial_metadata->InterceptAndMap(
[self = lb_call->RefAsSubclass<PromiseBasedLoadBalancedCall>()](
ServerMetadataHandle metadata) {
if (self->call_attempt_tracer() != nullptr) {
self->call_attempt_tracer()->RecordReceivedInitialMetadata(
metadata.get());
}
Slice* peer_string = metadata->get_pointer(PeerString());
if (peer_string != nullptr) self->peer_string_ = peer_string->Ref();
return metadata;
});
client_initial_metadata_ = std::move(call_args.client_initial_metadata);
return OnCancel(
Map(TrySeq(
// LB pick.
[this]() -> Poll<absl::Status> {
auto result = PickSubchannel(was_queued_);
if (GRPC_TRACE_FLAG_ENABLED(
grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: %sPickSubchannel() returns %s",
chand(), this,
GetContext<Activity>()->DebugTag().c_str(),
result.has_value() ? result->ToString().c_str()
: "Pending");
}
if (result == absl::nullopt) return Pending{};
return std::move(*result);
},
[this, call_args = std::move(call_args)]() mutable
-> ArenaPromise<ServerMetadataHandle> {
call_args.client_initial_metadata =
std::move(client_initial_metadata_);
return connected_subchannel()->MakeCallPromise(
std::move(call_args));
}),
// Record call completion.
[this](ServerMetadataHandle metadata) {
if (call_attempt_tracer() != nullptr ||
lb_subchannel_call_tracker() != nullptr) {
absl::Status status;
grpc_status_code code = metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN);
if (code != GRPC_STATUS_OK) {
absl::string_view message;
if (const auto* grpc_message =
metadata->get_pointer(GrpcMessageMetadata())) {
message = grpc_message->as_string_view();
}
status =
absl::Status(static_cast<absl::StatusCode>(code), message);
}
RecordCallCompletion(status, metadata.get(),
&GetContext<CallContext>()
->call_stats()
->transport_stream_stats,
peer_string_.as_string_view());
}
RecordLatency();
return metadata;
}),
[lb_call = std::move(lb_call)]() {
// If the waker is pending, then we need to remove ourself from
// the list of queued LB calls.
if (!lb_call->waker_.is_unwakeable()) {
MutexLock lock(&lb_call->chand()->lb_mu_);
lb_call->Commit();
// Remove pick from list of queued picks.
lb_call->RemoveCallFromLbQueuedCallsLocked();
// Remove from queued picks list.
lb_call->chand()->lb_queued_calls_.erase(lb_call.get());
}
// TODO(ctiller): We don't have access to the call's actual status
// here, so we just assume CANCELLED. We could change this to use
// CallFinalization instead of OnCancel() so that we can get the
// actual status. But we should also have access to the trailing
// metadata, which we don't have in either case. Ultimately, we
// need a better story for code that needs to run at the end of a
// call in both cancellation and non-cancellation cases that needs
// access to server trailing metadata and the call's real status.
if (lb_call->call_attempt_tracer() != nullptr) {
lb_call->call_attempt_tracer()->RecordCancel(
absl::CancelledError("call cancelled"));
}
if (lb_call->call_attempt_tracer() != nullptr ||
lb_call->lb_subchannel_call_tracker() != nullptr) {
// If we were cancelled without recording call completion, then
// record call completion here, as best we can. We assume status
// CANCELLED in this case.
lb_call->RecordCallCompletion(absl::CancelledError("call cancelled"),
nullptr, nullptr, "");
}
});
}
grpc_metadata_batch*
ClientChannelFilter::PromiseBasedLoadBalancedCall::send_initial_metadata()
const {
return client_initial_metadata_.get();
}
void ClientChannelFilter::PromiseBasedLoadBalancedCall::OnAddToQueueLocked() {
waker_ = GetContext<Activity>()->MakeNonOwningWaker();
was_queued_ = true;
}
void ClientChannelFilter::PromiseBasedLoadBalancedCall::RetryPickLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: RetryPickLocked()", chand(), this);
}
waker_.WakeupAsync();
}
} // namespace grpc_core

@ -57,8 +57,6 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -99,12 +97,10 @@ namespace grpc_core {
class ClientChannelFilter final {
public:
static const grpc_channel_filter kFilterVtableWithPromises;
static const grpc_channel_filter kFilterVtableWithoutPromises;
static const grpc_channel_filter kFilter;
class LoadBalancedCall;
class FilterBasedLoadBalancedCall;
class PromiseBasedLoadBalancedCall;
// Flag that this object gets stored in channel args as a raw pointer.
struct RawPointerChannelArgTag {};
@ -112,10 +108,6 @@ class ClientChannelFilter final {
return "grpc.internal.client_channel_filter";
}
static ArenaPromise<ServerMetadataHandle> MakeCallPromise(
grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next_promise_factory);
grpc_connectivity_state CheckConnectivityState(bool try_to_connect);
// Starts a one-time connectivity state watch. When the channel's state
@ -160,14 +152,9 @@ class ClientChannelFilter final {
grpc_closure* on_call_destruction_complete,
absl::AnyInvocable<void()> on_commit, bool is_transparent_retry);
ArenaPromise<ServerMetadataHandle> CreateLoadBalancedCallPromise(
CallArgs call_args, absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry);
private:
class CallData;
class FilterBasedCallData;
class PromiseBasedCallData;
class ResolverResultHandler;
class SubchannelWrapper;
class ClientChannelControlHelper;
@ -581,32 +568,6 @@ class ClientChannelFilter::FilterBasedLoadBalancedCall final
grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {};
};
class ClientChannelFilter::PromiseBasedLoadBalancedCall final
: public ClientChannelFilter::LoadBalancedCall {
public:
PromiseBasedLoadBalancedCall(ClientChannelFilter* chand,
absl::AnyInvocable<void()> on_commit,
bool is_transparent_retry);
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args, OrphanablePtr<PromiseBasedLoadBalancedCall> lb_call);
private:
grpc_polling_entity* pollent() override { return &pollent_; }
grpc_metadata_batch* send_initial_metadata() const override;
void RetryPickLocked() override;
void OnAddToQueueLocked() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannelFilter::lb_mu_);
grpc_polling_entity pollent_;
ClientMetadataHandle client_initial_metadata_;
Waker waker_;
bool was_queued_ = false;
Slice peer_string_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_CLIENT_CHANNEL_CLIENT_CHANNEL_FILTER_H

@ -31,24 +31,11 @@
namespace grpc_core {
namespace {
bool IsEverythingBelowClientChannelPromiseSafe(const ChannelArgs& args) {
return !args.GetBool(GRPC_ARG_ENABLE_RETRIES).value_or(true);
}
} // namespace
void BuildClientChannelConfiguration(CoreConfiguration::Builder* builder) {
internal::ClientChannelServiceConfigParser::Register(builder);
internal::RetryServiceConfigParser::Register(builder);
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_CHANNEL,
&ClientChannelFilter::kFilterVtableWithPromises)
.If(IsEverythingBelowClientChannelPromiseSafe)
.Terminal();
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_CHANNEL,
&ClientChannelFilter::kFilterVtableWithoutPromises)
.IfNot(IsEverythingBelowClientChannelPromiseSafe)
->RegisterV2Filter<ClientChannelFilter>(GRPC_CLIENT_CHANNEL)
.Terminal();
}

@ -18,6 +18,7 @@
#include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/subchannel.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/promise/loop.h"
#include "src/core/telemetry/call_tracer.h"
@ -289,6 +290,7 @@ void LoadBalancedCallDestination::StartCall(
[unstarted_handler, &last_picker](
RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>
picker) mutable {
CHECK_NE(picker.get(), nullptr);
last_picker = std::move(picker);
// Returns 3 possible things:
// - Continue to queue the pick
@ -330,4 +332,20 @@ void LoadBalancedCallDestination::StartCall(
});
}
} // namespace grpc_core
void RegisterLoadBalancedCallDestination(CoreConfiguration::Builder* builder) {
class LoadBalancedCallDestinationFactory final
: public ClientChannel::CallDestinationFactory {
public:
RefCountedPtr<UnstartedCallDestination> CreateCallDestination(
ClientChannel::PickerObservable picker) override {
return MakeRefCounted<LoadBalancedCallDestination>(std::move(picker));
}
};
builder->channel_args_preconditioning()->RegisterStage([](ChannelArgs args) {
return args.SetObject(
NoDestructSingleton<LoadBalancedCallDestinationFactory>::Get());
});
}
} // namespace grpc_core

@ -139,8 +139,6 @@ const RetryMethodConfig* RetryFilter::GetRetryPolicy(Arena* arena) {
const grpc_channel_filter RetryFilter::kVtable = {
RetryFilter::LegacyCallData::StartTransportStreamOpBatch,
nullptr,
/* init_call: */ nullptr,
RetryFilter::StartTransportOp,
sizeof(RetryFilter::LegacyCallData),
RetryFilter::LegacyCallData::Init,

@ -157,36 +157,6 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
channel_stack_->call_stack_size;
}
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args) override {
// If not using channelz, we just need to call the channel stack.
if (channelz_subchannel() == nullptr) {
return channel_stack_->MakeClientCallPromise(std::move(call_args));
}
// Otherwise, we need to wrap the channel stack promise with code that
// handles the channelz updates.
return OnCancel(
Seq(channel_stack_->MakeClientCallPromise(std::move(call_args)),
[self = Ref()](ServerMetadataHandle metadata) {
channelz::SubchannelNode* channelz_subchannel =
self->channelz_subchannel();
CHECK(channelz_subchannel != nullptr);
if (metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
channelz_subchannel->RecordCallFailed();
} else {
channelz_subchannel->RecordCallSucceeded();
}
return metadata;
}),
[self = Ref()]() {
channelz::SubchannelNode* channelz_subchannel =
self->channelz_subchannel();
CHECK(channelz_subchannel != nullptr);
channelz_subchannel->RecordCallFailed();
});
}
void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) override {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->send_ping.on_initiate = on_initiate;
@ -252,10 +222,6 @@ class NewConnectedSubchannel : public ConnectedSubchannel {
size_t GetInitialCallSizeEstimate() const override { return 0; }
ArenaPromise<ServerMetadataHandle> MakeCallPromise(CallArgs) override {
Crash("legacy MakeCallPromise() method called in call v3 impl");
}
void Ping(grpc_closure*, grpc_closure*) override {
Crash("legacy ping method called in call v3 impl");
}
@ -869,8 +835,7 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
bool Subchannel::PublishTransportLocked() {
auto socket_node = std::move(connecting_result_.socket_node);
if (connecting_result_.transport->filter_stack_transport() != nullptr ||
IsChaoticGoodEnabled()) {
if (connecting_result_.transport->filter_stack_transport() != nullptr) {
// Construct channel stack.
// Builder takes ownership of transport.
ChannelStackBuilderImpl builder(

@ -82,8 +82,6 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
// Methods for legacy stack.
virtual grpc_channel_stack* channel_stack() const = 0;
virtual size_t GetInitialCallSizeEstimate() const = 0;
virtual ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args) = 0;
virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0;
protected:

@ -303,15 +303,13 @@ void RegisterLegacyChannelIdleFilters(CoreConfiguration::Builder* builder) {
.If([](const ChannelArgs& channel_args) {
return GetClientIdleTimeout(channel_args) != Duration::Infinity();
});
if (!IsChaoticGoodEnabled()) {
builder->channel_init()
->RegisterV2Filter<LegacyMaxAgeFilter>(GRPC_SERVER_CHANNEL)
.ExcludeFromMinimalStack()
.If([](const ChannelArgs& channel_args) {
return LegacyMaxAgeFilter::Config::FromChannelArgs(channel_args)
.enable();
});
}
builder->channel_init()
->RegisterV2Filter<LegacyMaxAgeFilter>(GRPC_SERVER_CHANNEL)
.ExcludeFromMinimalStack()
.If([](const ChannelArgs& channel_args) {
return LegacyMaxAgeFilter::Config::FromChannelArgs(channel_args)
.enable();
});
}
LegacyMaxAgeFilter::LegacyMaxAgeFilter(grpc_channel_stack* channel_stack,

@ -376,13 +376,14 @@ grpc_channel* grpc_chaotic_good_channel_create(const char* target,
grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(args)
.SetObject(
grpc_core::NoDestructSingleton<
grpc_core::chaotic_good::ChaoticGoodChannelFactory>::Get()),
.SetObject(grpc_core::NoDestructSingleton<
grpc_core::chaotic_good::ChaoticGoodChannelFactory>::Get())
.Set(GRPC_ARG_USE_V3_STACK, true),
GRPC_CLIENT_CHANNEL, nullptr);
if (r.ok()) {
return r->release()->c_ptr();
}
LOG(ERROR) << "Failed to create chaotic good client channel: " << r.status();
error = absl_status_to_grpc_error(r.status());
intptr_t integer;
grpc_status_code status = GRPC_STATUS_INTERNAL;
@ -391,6 +392,6 @@ grpc_channel* grpc_chaotic_good_channel_create(const char* target,
status = static_cast<grpc_status_code>(integer);
}
channel = grpc_lame_client_channel_create(
target, status, "Failed to create secure client channel");
target, status, "Failed to create chaotic good client channel");
return channel;
}

@ -483,20 +483,21 @@ int grpc_server_add_chaotic_good_port(grpc_server* server, const char* addr) {
return 0;
}
int port_num = 0;
std::vector<std::pair<std::string, absl::Status>> error_list;
for (const auto& resolved_addr : resolved_or.value()) {
auto listener = grpc_core::MakeOrphanable<
grpc_core::chaotic_good::ChaoticGoodServerListener>(
core_server, core_server->channel_args());
const auto ee_addr =
grpc_event_engine::experimental::CreateResolvedAddress(resolved_addr);
gpr_log(GPR_INFO, "BIND: %s",
grpc_event_engine::experimental::ResolvedAddressToString(ee_addr)
->c_str());
std::string addr_str =
*grpc_event_engine::experimental::ResolvedAddressToString(ee_addr);
LOG(INFO) << "BIND: " << addr_str;
auto bind_result = listener->Bind(ee_addr);
if (!bind_result.ok()) {
LOG(ERROR) << "Failed to bind to " << addr << ": "
<< bind_result.status().ToString();
return 0;
error_list.push_back(
std::make_pair(std::move(addr_str), bind_result.status()));
continue;
}
if (port_num == 0) {
port_num = bind_result.value();
@ -505,5 +506,16 @@ int grpc_server_add_chaotic_good_port(grpc_server* server, const char* addr) {
}
core_server->AddListener(std::move(listener));
}
if (error_list.size() == resolved_or->size()) {
LOG(ERROR) << "Failed to bind any address for " << addr;
for (const auto& error : error_list) {
LOG(ERROR) << " " << error.first << ": " << error.second;
}
} else if (!error_list.empty()) {
LOG(INFO) << "Failed to bind some addresses for " << addr;
for (const auto& error : error_list) {
LOG(INFO) << " " << error.first << ": " << error.second;
}
}
return port_num;
}

@ -78,24 +78,27 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
gpr_log(GPR_INFO, "CHAOTIC_GOOD: PushFragmentIntoCall: frame=%s",
frame.ToString().c_str());
}
return TrySeq(If(
frame.message.has_value(),
[&call_initiator, &frame]() mutable {
return call_initiator.PushMessage(
std::move(frame.message->message));
},
[]() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id]() mutable -> StatusFlag {
if (end_of_stream) {
call_initiator.FinishSends();
// We have received end_of_stream. It is now safe to remove
// the call from the stream map.
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
}
return Success{};
});
return Seq(If(
frame.message.has_value(),
[&call_initiator, &frame]() mutable {
return call_initiator.PushMessage(
std::move(frame.message->message));
},
[]() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id](StatusFlag status) mutable -> StatusFlag {
if (!status.ok() && grpc_chaotic_good_trace.enabled()) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: Failed PushFragmentIntoCall");
}
if (end_of_stream || !status.ok()) {
call_initiator.FinishSends();
// We have received end_of_stream. It is now safe to remove
// the call from the stream map.
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
}
return Success{};
});
}
auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
@ -340,13 +343,14 @@ auto ChaoticGoodServerTransport::TransportReadLoop(
auto ChaoticGoodServerTransport::OnTransportActivityDone(
absl::string_view activity) {
return [this, activity](absl::Status status) {
return [self = RefAsSubclass<ChaoticGoodServerTransport>(),
activity](absl::Status status) {
if (grpc_chaotic_good_trace.enabled()) {
gpr_log(GPR_INFO,
"CHAOTIC_GOOD: OnTransportActivityDone: activity=%s status=%s",
std::string(activity).c_str(), status.ToString().c_str());
}
AbortWithError();
self->AbortWithError();
};
}

@ -290,7 +290,7 @@ class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
}
};
absl::StatusOr<OrphanablePtr<Channel>> CreateChannel(const char* target,
absl::StatusOr<RefCountedPtr<Channel>> CreateChannel(const char* target,
const ChannelArgs& args) {
if (target == nullptr) {
LOG(ERROR) << "cannot create channel with NULL target name";

@ -171,9 +171,7 @@ class InprocClientTransport final : public ClientTransport {
};
bool UsePromiseBasedTransport() {
if (!IsPromiseBasedInprocTransportEnabled()) return false;
CHECK(IsPromiseBasedClientCallEnabled());
return true;
return IsPromiseBasedInprocTransportEnabled();
}
OrphanablePtr<InprocClientTransport>
@ -182,7 +180,7 @@ InprocServerTransport::MakeClientTransport() {
RefAsSubclass<InprocServerTransport>());
}
OrphanablePtr<Channel> MakeLameChannel(absl::string_view why,
RefCountedPtr<Channel> MakeLameChannel(absl::string_view why,
absl::Status error) {
gpr_log(GPR_ERROR, "%s: %s", std::string(why).c_str(),
std::string(error.message()).c_str());
@ -191,11 +189,11 @@ OrphanablePtr<Channel> MakeLameChannel(absl::string_view why,
if (grpc_error_get_int(error, StatusIntProperty::kRpcStatus, &integer)) {
status = static_cast<grpc_status_code>(integer);
}
return OrphanablePtr<Channel>(Channel::FromC(grpc_lame_client_channel_create(
return RefCountedPtr<Channel>(Channel::FromC(grpc_lame_client_channel_create(
nullptr, status, std::string(why).c_str())));
}
OrphanablePtr<Channel> MakeInprocChannel(Server* server,
RefCountedPtr<Channel> MakeInprocChannel(Server* server,
ChannelArgs client_channel_args) {
auto transports = MakeInProcessTransportPair(server->channel_args());
auto client_transport = std::move(transports.first);

@ -124,8 +124,7 @@ grpc_error_handle grpc_channel_stack_init(
if (grpc_trace_channel_stack.enabled()) {
LOG(INFO) << "CHANNEL_STACK: init " << name;
for (size_t i = 0; i < filter_count; i++) {
gpr_log(GPR_INFO, "CHANNEL_STACK: filter %s%s", filters[i]->name,
filters[i]->make_call_promise ? " [promise-capable]" : "");
LOG(INFO) << "CHANNEL_STACK: filter " << filters[i]->name;
}
}
@ -297,35 +296,6 @@ grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) {
void grpc_channel_stack_no_post_init(grpc_channel_stack*,
grpc_channel_element*) {}
namespace {
grpc_core::NextPromiseFactory ClientNext(grpc_channel_element* elem) {
return [elem](grpc_core::CallArgs args) {
return elem->filter->make_call_promise(elem, std::move(args),
ClientNext(elem + 1));
};
}
} // namespace
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
grpc_channel_stack::MakeClientCallPromise(grpc_core::CallArgs call_args) {
return ClientNext(grpc_channel_stack_element(this, 0))(std::move(call_args));
}
void grpc_channel_stack::InitClientCallSpine(
grpc_core::CallSpineInterface* call) {
for (size_t i = 0; i < count; i++) {
auto* elem = grpc_channel_stack_element(this, i);
if (elem->filter->init_call == nullptr) {
grpc_core::Crash(
absl::StrCat("Filter '", elem->filter->name,
"' does not support the call-v3 interface"));
}
elem->filter->init_call(elem, call);
}
}
void grpc_call_log_op(const char* file, int line, gpr_log_severity severity,
grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {

@ -105,24 +105,6 @@ struct grpc_channel_filter {
// See grpc_call_next_op on how to call the next element in the stack
void (*start_transport_stream_op_batch)(grpc_call_element* elem,
grpc_transport_stream_op_batch* op);
// Create a promise to execute one call.
// If this is non-null, it may be used in preference to
// start_transport_stream_op_batch.
// If this is used in preference to start_transport_stream_op_batch, the
// following can be omitted also:
// - calling init_call_elem, destroy_call_elem, set_pollset_or_pollset_set
// - allocation of memory for call data
// There is an on-going migration to move all filters to providing this, and
// then to drop start_transport_stream_op_batch.
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle> (*make_call_promise)(
grpc_channel_element* elem, grpc_core::CallArgs call_args,
grpc_core::NextPromiseFactory next_promise_factory);
// Register interceptors into a call.
// If this is non-null it may be used in preference to make_call_promise.
// There is an on-going migration to move all filters to providing this, and
// then to drop start_transport_stream_op_batch.
void (*init_call)(grpc_channel_element* elem,
grpc_core::CallSpineInterface* call_spine);
// Called to handle channel level operations - e.g. new calls, or transport
// closure.
// See grpc_channel_next_op on how to call the next element in the stack
@ -233,13 +215,6 @@ struct grpc_channel_stack {
IncrementRefCount();
return grpc_core::RefCountedPtr<grpc_channel_stack>(this);
}
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
MakeClientCallPromise(grpc_core::CallArgs call_args);
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle>
MakeServerCallPromise(grpc_core::CallArgs call_args);
void InitClientCallSpine(grpc_core::CallSpineInterface* call);
};
// A call stack tracks a set of related filters for one call, and guarantees

@ -74,11 +74,6 @@ class ChannelStackBuilder {
// Helper to add a filter to the end of the stack.
void AppendFilter(const grpc_channel_filter* filter);
// Determine whether a promise-based call stack is able to be built.
// Iterates each filter and ensures that there's a promise factory there.
// This will go away once the promise conversion is completed.
virtual bool IsPromising() const = 0;
// Build the channel stack.
// After success, *result holds the new channel stack,
// prefix_bytes are allocated before the channel stack,

@ -54,153 +54,12 @@
namespace grpc_core {
namespace {
const grpc_channel_filter* PromiseTracingFilterFor(
const grpc_channel_filter* filter) {
struct DerivedFilter : public grpc_channel_filter {
explicit DerivedFilter(const grpc_channel_filter* filter)
: grpc_channel_filter{
// start_transport_stream_op_batch:
grpc_call_next_op,
// make_call_promise:
[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next_promise_factory)
-> ArenaPromise<ServerMetadataHandle> {
auto* source_filter =
static_cast<const DerivedFilter*>(elem->filter)->filter;
gpr_log(
GPR_DEBUG,
"%s[%s] CreateCallPromise: client_initial_metadata=%s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name,
call_args.client_initial_metadata->DebugString().c_str());
return [source_filter, child = next_promise_factory(
std::move(call_args))]() mutable {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: begin",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name);
auto r = child();
if (auto* p = r.value_if_ready()) {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: done: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, (*p)->DebugString().c_str());
} else {
gpr_log(GPR_DEBUG, "%s[%s] PollCallPromise: <<pending>>",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name);
}
return r;
};
},
/* init_call: */
[](grpc_channel_element* elem, CallSpineInterface* call) {
auto* c = DownCast<PipeBasedCallSpine*>(call);
auto* source_filter =
static_cast<const DerivedFilter*>(elem->filter)->filter;
c->client_initial_metadata().receiver.InterceptAndMap(
[source_filter](ClientMetadataHandle md) {
gpr_log(GPR_DEBUG, "%s[%s] OnClientInitialMetadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, md->DebugString().c_str());
return md;
});
c->client_to_server_messages().receiver.InterceptAndMap(
[source_filter](MessageHandle msg) {
gpr_log(GPR_DEBUG, "%s[%s] OnClientToServerMessage: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, msg->DebugString().c_str());
return msg;
});
c->server_initial_metadata().sender.InterceptAndMap(
[source_filter](ServerMetadataHandle md) {
gpr_log(GPR_DEBUG, "%s[%s] OnServerInitialMetadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, md->DebugString().c_str());
return md;
});
c->server_to_client_messages().sender.InterceptAndMap(
[source_filter](MessageHandle msg) {
gpr_log(GPR_DEBUG, "%s[%s] OnServerToClientMessage: %s",
GetContext<Activity>()->DebugTag().c_str(),
source_filter->name, msg->DebugString().c_str());
return msg;
});
},
grpc_channel_next_op,
/* sizeof_call_data: */ 0,
// init_call_elem:
[](grpc_call_element*, const grpc_call_element_args*) {
return absl::OkStatus();
},
grpc_call_stack_ignore_set_pollset_or_pollset_set,
// destroy_call_elem:
[](grpc_call_element*, const grpc_call_final_info*,
grpc_closure*) {},
// sizeof_channel_data:
0,
// init_channel_elem:
[](grpc_channel_element*, grpc_channel_element_args*) {
return absl::OkStatus();
},
// post_init_channel_elem:
[](grpc_channel_stack*, grpc_channel_element*) {},
// destroy_channel_elem:
[](grpc_channel_element*) {}, grpc_channel_next_get_info,
// name:
nullptr},
filter(filter),
name_str(absl::StrCat(filter->name, ".trace")) {
this->name = name_str.c_str();
}
const grpc_channel_filter* const filter;
const std::string name_str;
};
struct Globals {
Mutex mu;
absl::flat_hash_map<const grpc_channel_filter*,
std::unique_ptr<DerivedFilter>>
map ABSL_GUARDED_BY(mu);
};
auto* globals = NoDestructSingleton<Globals>::Get();
MutexLock lock(&globals->mu);
auto it = globals->map.find(filter);
if (it != globals->map.end()) return it->second.get();
return globals->map.emplace(filter, std::make_unique<DerivedFilter>(filter))
.first->second.get();
}
} // namespace
bool ChannelStackBuilderImpl::IsPromising() const {
for (const auto* filter : stack()) {
if (filter->make_call_promise == nullptr) return false;
}
return true;
}
absl::StatusOr<RefCountedPtr<grpc_channel_stack>>
ChannelStackBuilderImpl::Build() {
std::vector<const grpc_channel_filter*> stack;
const bool is_promising = IsPromising();
const bool is_client =
grpc_channel_stack_type_is_client(channel_stack_type());
const bool client_promise_tracing =
is_client && is_promising && grpc_call_trace.enabled();
const bool server_promise_tracing =
!is_client && is_promising && grpc_call_trace.enabled();
for (const auto* filter : this->stack()) {
if (client_promise_tracing) {
stack.push_back(PromiseTracingFilterFor(filter));
}
stack.push_back(filter);
if (server_promise_tracing) {
stack.push_back(PromiseTracingFilterFor(filter));
}
}
if (server_promise_tracing) {
stack.pop_back(); // connected_channel must be last => can't be traced
}
// calculate the size of the channel stack

@ -34,8 +34,6 @@ class ChannelStackBuilderImpl final : public ChannelStackBuilder {
public:
using ChannelStackBuilder::ChannelStackBuilder;
bool IsPromising() const override;
// Build the channel stack.
// After success, *result holds the new channel stack,
// prefix_bytes are allocated before the channel stack,

@ -74,7 +74,6 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/batch_builder.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@ -245,683 +244,48 @@ static void connected_channel_get_channel_info(
namespace grpc_core {
namespace {
#if defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL) || \
defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
class ConnectedChannelStream : public Orphanable {
public:
explicit ConnectedChannelStream(Transport* transport)
: transport_(transport), stream_(nullptr, StreamDeleter(this)) {
GRPC_STREAM_REF_INIT(
&stream_refcount_, 1,
[](void* p, grpc_error_handle) {
static_cast<ConnectedChannelStream*>(p)->BeginDestroy();
},
this, "ConnectedChannelStream");
}
Transport* transport() { return transport_; }
grpc_closure* stream_destroyed_closure() { return &stream_destroyed_; }
BatchBuilder::Target batch_target() {
return BatchBuilder::Target{transport_, stream_.get(), &stream_refcount_};
}
void IncrementRefCount(const char* reason = "smartptr") {
#ifndef NDEBUG
grpc_stream_ref(&stream_refcount_, reason);
#else
(void)reason;
grpc_stream_ref(&stream_refcount_);
#endif
}
void Unref(const char* reason = "smartptr") {
#ifndef NDEBUG
grpc_stream_unref(&stream_refcount_, reason);
#else
(void)reason;
grpc_stream_unref(&stream_refcount_);
#endif
}
RefCountedPtr<ConnectedChannelStream> InternalRef() {
IncrementRefCount("smartptr");
return RefCountedPtr<ConnectedChannelStream>(this);
}
void Orphan() final {
bool finished = finished_.IsSet();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Orphan stream, finished: %d",
party_->DebugTag().c_str(), finished);
}
// If we hadn't already observed the stream to be finished, we need to
// cancel it at the transport.
if (!finished) {
party_->Spawn(
"finish",
[self = InternalRef()]() {
if (!self->finished_.IsSet()) {
self->finished_.Set();
}
return Empty{};
},
[](Empty) {});
GetContext<BatchBuilder>()->Cancel(batch_target(),
absl::CancelledError());
}
Unref("orphan connected stream");
}
// Returns a promise that implements the receive message loop.
auto RecvMessages(PipeSender<MessageHandle>* incoming_messages,
bool cancel_on_error);
// Returns a promise that implements the send message loop.
auto SendMessages(PipeReceiver<MessageHandle>* outgoing_messages);
void SetStream(grpc_stream* stream) { stream_.reset(stream); }
grpc_stream* stream() { return stream_.get(); }
grpc_stream_refcount* stream_refcount() { return &stream_refcount_; }
void set_finished() { finished_.Set(); }
auto WaitFinished() { return finished_.Wait(); }
private:
class StreamDeleter {
public:
explicit StreamDeleter(ConnectedChannelStream* impl) : impl_(impl) {}
void operator()(grpc_stream* stream) const {
if (stream == nullptr) return;
impl_->transport()->filter_stack_transport()->DestroyStream(
stream, impl_->stream_destroyed_closure());
}
private:
ConnectedChannelStream* impl_;
};
using StreamPtr = std::unique_ptr<grpc_stream, StreamDeleter>;
void StreamDestroyed() {
call_context_->RunInContext([this] { this->~ConnectedChannelStream(); });
}
void BeginDestroy() {
if (stream_ != nullptr) {
stream_.reset();
} else {
StreamDestroyed();
}
}
Transport* const transport_;
RefCountedPtr<CallContext> const call_context_{
GetContext<CallContext>()->Ref()};
grpc_closure stream_destroyed_ =
MakeMemberClosure<ConnectedChannelStream,
&ConnectedChannelStream::StreamDestroyed>(
this, DEBUG_LOCATION);
grpc_stream_refcount stream_refcount_;
StreamPtr stream_;
Arena* arena_ = GetContext<Arena>();
Party* const party_ = GetContext<Party>();
ExternallyObservableLatch<void> finished_;
const grpc_channel_filter kConnectedFilter{
connected_channel_start_transport_stream_op_batch,
connected_channel_start_transport_op,
sizeof(call_data),
connected_channel_init_call_elem,
set_pollset_or_pollset_set,
connected_channel_destroy_call_elem,
sizeof(channel_data),
connected_channel_init_channel_elem,
+[](grpc_channel_stack* channel_stack, grpc_channel_element* elem) {
// HACK(ctiller): increase call stack size for the channel to make
// space for channel data. We need a cleaner (but performant) way to
// do this, and I'm not sure what that is yet. This is only "safe"
// because call stacks place no additional data after the last call
// element, and the last call element MUST be the connected channel.
auto* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
if (transport->filter_stack_transport() != nullptr) {
channel_stack->call_stack_size +=
transport->filter_stack_transport()->SizeOfStream();
}
},
connected_channel_destroy_channel_elem,
connected_channel_get_channel_info,
"connected",
};
auto ConnectedChannelStream::RecvMessages(
PipeSender<MessageHandle>* incoming_messages, bool cancel_on_error) {
return Loop([self = InternalRef(), cancel_on_error,
incoming_messages = std::move(*incoming_messages)]() mutable {
return Seq(
GetContext<BatchBuilder>()->ReceiveMessage(self->batch_target()),
[cancel_on_error, &incoming_messages](
absl::StatusOr<absl::optional<MessageHandle>> status) mutable {
bool has_message = status.ok() && status->has_value();
auto publish_message = [&incoming_messages, &status]() {
auto pending_message = std::move(**status);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[connected] RecvMessage: received payload of %" PRIdPTR
" bytes",
GetContext<Activity>()->DebugTag().c_str(),
pending_message->payload()->Length());
}
return Map(incoming_messages.Push(std::move(pending_message)),
[](bool ok) -> LoopCtl<absl::Status> {
if (!ok) {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_INFO,
"%s[connected] RecvMessage: failed to "
"push message towards the application",
GetContext<Activity>()->DebugTag().c_str());
}
return absl::OkStatus();
}
return Continue{};
});
};
auto publish_close = [cancel_on_error, &incoming_messages,
&status]() mutable {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[connected] RecvMessage: reached end of stream with "
"status:%s",
GetContext<Activity>()->DebugTag().c_str(),
status.status().ToString().c_str());
}
if (cancel_on_error && !status.ok()) {
incoming_messages.CloseWithError();
} else {
incoming_messages.Close();
}
return Immediate(LoopCtl<absl::Status>(status.status()));
};
return If(has_message, std::move(publish_message),
std::move(publish_close));
});
});
}
auto ConnectedChannelStream::SendMessages(
PipeReceiver<MessageHandle>* outgoing_messages) {
return ForEach(std::move(*outgoing_messages),
[self = InternalRef()](MessageHandle message) {
return GetContext<BatchBuilder>()->SendMessage(
self->batch_target(), std::move(message));
});
}
#endif // defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL) ||
// defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(Transport* transport,
CallArgs call_args,
NextPromiseFactory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(stream->stream(),
stream->stream_refcount(),
nullptr, GetContext<Arena>());
auto* party = GetContext<Party>();
party->Spawn("set_polling_entity", call_args.polling_entity->Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
// Start a loop to send messages from client_to_server_messages to the
// transport. When the pipe closes and the loop completes, send a trailing
// metadata batch to close the stream.
party->Spawn(
"send_messages",
TrySeq(stream->SendMessages(call_args.client_to_server_messages),
[stream = stream->InternalRef()]() {
return GetContext<BatchBuilder>()->SendClientTrailingMetadata(
stream->batch_target());
}),
[](absl::Status) {});
// Start a promise to receive server initial metadata and then forward it up
// through the receiving pipe.
auto server_initial_metadata = Arena::MakePooled<ServerMetadata>();
party->Spawn(
"recv_initial_metadata",
TrySeq(GetContext<BatchBuilder>()->ReceiveServerInitialMetadata(
stream->batch_target()),
[pipe = call_args.server_initial_metadata](
ServerMetadataHandle server_initial_metadata) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG,
"%s[connected] Publish client initial metadata: %s",
GetContext<Activity>()->DebugTag().c_str(),
server_initial_metadata->DebugString().c_str());
}
return Map(pipe->Push(std::move(server_initial_metadata)),
[](bool r) {
if (r) return absl::OkStatus();
return absl::CancelledError();
});
}),
[](absl::Status) {});
// Build up the rest of the main call promise:
// Create a promise that will send initial metadata and then signal completion
// of that via the token.
auto send_initial_metadata = Seq(
GetContext<BatchBuilder>()->SendClientInitialMetadata(
stream->batch_target(), std::move(call_args.client_initial_metadata)),
[sent_initial_metadata_token =
std::move(call_args.client_initial_metadata_outstanding)](
absl::Status status) mutable {
sent_initial_metadata_token.Complete(status.ok());
return status;
});
// Create a promise that will receive server trailing metadata.
// If this fails, we massage the error into metadata that we can report
// upwards.
auto server_trailing_metadata = Arena::MakePooled<ServerMetadata>();
auto recv_trailing_metadata = Map(
GetContext<BatchBuilder>()->ReceiveServerTrailingMetadata(
stream->batch_target()),
[](absl::StatusOr<ServerMetadataHandle> status) mutable {
if (!status.ok()) {
auto server_trailing_metadata = Arena::MakePooled<ServerMetadata>();
grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
std::string message;
grpc_error_get_status(status.status(), Timestamp::InfFuture(),
&status_code, &message, nullptr, nullptr);
server_trailing_metadata->Set(GrpcStatusMetadata(), status_code);
server_trailing_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(message));
return server_trailing_metadata;
} else {
return std::move(*status);
}
});
// Finally the main call promise.
// Concurrently: send initial metadata and receive messages, until BOTH
// complete (or one fails).
// Next: receive trailing metadata, and return that up the stack.
auto recv_messages =
stream->RecvMessages(call_args.server_to_client_messages, false);
return Map(
[send_initial_metadata = std::move(send_initial_metadata),
recv_messages = std::move(recv_messages),
recv_trailing_metadata = std::move(recv_trailing_metadata),
done_send_initial_metadata = false, done_recv_messages = false,
done_recv_trailing_metadata =
false]() mutable -> Poll<ServerMetadataHandle> {
if (!done_send_initial_metadata) {
auto p = send_initial_metadata();
if (auto* r = p.value_if_ready()) {
done_send_initial_metadata = true;
if (!r->ok()) return StatusCast<ServerMetadataHandle>(*r);
}
}
if (!done_recv_messages) {
auto p = recv_messages();
if (p.ready()) {
// NOTE: ignore errors here, they'll be collected in the
// recv_trailing_metadata.
done_recv_messages = true;
} else {
return Pending{};
}
}
if (!done_recv_trailing_metadata) {
auto p = recv_trailing_metadata();
if (auto* r = p.value_if_ready()) {
done_recv_trailing_metadata = true;
return std::move(*r);
}
}
return Pending{};
},
[stream = std::move(stream)](ServerMetadataHandle result) {
stream->set_finished();
return result;
});
}
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
Transport* transport, CallArgs, NextPromiseFactory next_promise_factory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(
stream->stream(), stream->stream_refcount(),
GetContext<CallContext>()->server_call_context()->server_stream_data(),
GetContext<Arena>());
auto* party = GetContext<Party>();
// Arifacts we need for the lifetime of the call.
struct CallData {
Pipe<MessageHandle> server_to_client;
Pipe<MessageHandle> client_to_server;
Pipe<ServerMetadataHandle> server_initial_metadata;
Latch<ServerMetadataHandle> failure_latch;
Latch<grpc_polling_entity> polling_entity_latch;
bool sent_initial_metadata = false;
bool sent_trailing_metadata = false;
};
auto* call_data = GetContext<Arena>()->New<CallData>();
GetContext<CallFinalization>()->Add(
[call_data](const grpc_call_final_info*) { call_data->~CallData(); });
party->Spawn("set_polling_entity", call_data->polling_entity_latch.Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
auto server_to_client_empty =
call_data->server_to_client.receiver.AwaitEmpty();
// Create a promise that will receive client initial metadata, and then run
// the main stem of the call (calling next_promise_factory up through the
// filters).
// Race the main call with failure_latch, allowing us to forcefully complete
// the call in the case of a failure.
auto recv_initial_metadata_then_run_promise =
TrySeq(GetContext<BatchBuilder>()->ReceiveClientInitialMetadata(
stream->batch_target()),
[next_promise_factory = std::move(next_promise_factory),
server_to_client_empty = std::move(server_to_client_empty),
call_data](ClientMetadataHandle client_initial_metadata) {
auto call_promise = next_promise_factory(CallArgs{
std::move(client_initial_metadata),
ClientInitialMetadataOutstandingToken::Empty(),
&call_data->polling_entity_latch,
&call_data->server_initial_metadata.sender,
&call_data->client_to_server.receiver,
&call_data->server_to_client.sender,
});
return Race(call_data->failure_latch.Wait(),
[call_promise = std::move(call_promise),
server_to_client_empty =
std::move(server_to_client_empty)]() mutable
-> Poll<ServerMetadataHandle> {
// TODO(ctiller): this is deeply weird and we need
// to clean this up.
//
// The following few lines check to ensure that
// there's no message currently pending in the
// outgoing message queue, and if (and only if)
// that's true decides to poll the main promise to
// see if there's a result.
//
// This essentially introduces a polling priority
// scheme that makes the current promise structure
// work out the way we want when talking to
// transports.
//
// The problem is that transports are going to need
// to replicate this structure when they convert to
// promises, and that becomes troubling as we'll be
// replicating weird throughout the stack.
//
// Instead we likely need to change the way we're
// composing promises through the stack.
//
// Proposed is to change filters from a promise
// that takes ClientInitialMetadata and returns
// ServerTrailingMetadata with three pipes for
// ServerInitialMetadata and
// ClientToServerMessages, ServerToClientMessages.
// Instead we'll have five pipes, moving
// ClientInitialMetadata and ServerTrailingMetadata
// to pipes that can be intercepted.
//
// The effect of this change will be to cripple the
// things that can be done in a filter (but cripple
// in line with what most filters actually do).
// We'll likely need to add a `CallContext::Cancel`
// to allow filters to cancel a request, but this
// would also have the advantage of centralizing
// our cancellation machinery which seems like an
// additional win - with the net effect that the
// shape of the call gets made explicit at the top
// & bottom of the stack.
//
// There's a small set of filters (retry, this one,
// lame client, clinet channel) that terminate
// stacks and need a richer set of semantics, but
// that ends up being fine because we can spawn
// tasks in parties to handle those edge cases, and
// keep the majority of filters simple: they just
// call InterceptAndMap on a handful of filters at
// call initialization time and then proceed to
// actually filter.
//
// So that's the plan, why isn't it enacted here?
//
// Well, the plan ends up being easy to implement
// in the promise based world (I did a prototype on
// a branch in an afternoon). It's heinous to
// implement in promise_based_filter, and that code
// is load bearing for us at the time of writing.
// It's not worth delaying promises for a further N
// months (N ~ 6) to make that change.
//
// Instead, we'll move forward with this, get
// promise_based_filter out of the picture, and
// then during the mop-up phase for promises tweak
// the compute structure to move to the magical
// five pipes (I'm reminded of an old Onion
// article), and end up in a good happy place.
if (server_to_client_empty().pending()) {
return Pending{};
}
return call_promise();
});
});
// Promise factory that accepts a ServerMetadataHandle, and sends it as the
// trailing metadata for this call.
auto send_trailing_metadata = [call_data, stream = stream->InternalRef()](
ServerMetadataHandle
server_trailing_metadata) {
bool is_cancellation =
server_trailing_metadata->get(GrpcCallWasCancelled()).value_or(false);
return GetContext<BatchBuilder>()->SendServerTrailingMetadata(
stream->batch_target(), std::move(server_trailing_metadata),
is_cancellation ||
!std::exchange(call_data->sent_initial_metadata, true));
};
// Runs the receive message loop, either until all the messages
// are received or the server call is complete.
party->Spawn(
"recv_messages",
Race(
Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
Map(stream->RecvMessages(&call_data->client_to_server.sender, true),
[failure_latch = &call_data->failure_latch](absl::Status status) {
if (!status.ok() && !failure_latch->is_set()) {
failure_latch->Set(ServerMetadataFromStatus(status));
}
return status;
})),
[](absl::Status) {});
// Run a promise that will send initial metadata (if that pipe sends some).
// And then run the send message loop until that completes.
auto send_initial_metadata = Seq(
Race(Map(stream->WaitFinished(),
[](Empty) { return NextResult<ServerMetadataHandle>(true); }),
call_data->server_initial_metadata.receiver.Next()),
[call_data, stream = stream->InternalRef()](
NextResult<ServerMetadataHandle> next_result) mutable {
auto md = !call_data->sent_initial_metadata && next_result.has_value()
? std::move(next_result.value())
: nullptr;
if (md != nullptr) {
call_data->sent_initial_metadata = true;
auto* party = GetContext<Party>();
party->Spawn("connected/send_initial_metadata",
GetContext<BatchBuilder>()->SendServerInitialMetadata(
stream->batch_target(), std::move(md)),
[](absl::Status) {});
return Immediate(absl::OkStatus());
}
return Immediate(absl::CancelledError());
});
party->Spawn(
"send_initial_metadata_then_messages",
Race(Map(stream->WaitFinished(), [](Empty) { return absl::OkStatus(); }),
TrySeq(std::move(send_initial_metadata),
stream->SendMessages(&call_data->server_to_client.receiver))),
[](absl::Status) {});
// Spawn a job to fetch the "client trailing metadata" - if this is OK then
// it's client done, otherwise it's a signal of cancellation from the client
// which we'll use failure_latch to signal.
party->Spawn(
"recv_trailing_metadata",
Seq(GetContext<BatchBuilder>()->ReceiveClientTrailingMetadata(
stream->batch_target()),
[failure_latch = &call_data->failure_latch](
absl::StatusOr<ClientMetadataHandle> status) mutable {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"%s[connected] Got trailing metadata; status=%s metadata=%s",
GetContext<Activity>()->DebugTag().c_str(),
status.status().ToString().c_str(),
status.ok() ? (*status)->DebugString().c_str() : "<none>");
}
ClientMetadataHandle trailing_metadata;
if (status.ok()) {
trailing_metadata = std::move(*status);
} else {
trailing_metadata = Arena::MakePooled<ClientMetadata>();
grpc_status_code status_code = GRPC_STATUS_UNKNOWN;
std::string message;
grpc_error_get_status(status.status(), Timestamp::InfFuture(),
&status_code, &message, nullptr, nullptr);
trailing_metadata->Set(GrpcStatusMetadata(), status_code);
trailing_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(message));
}
if (trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
if (!failure_latch->is_set()) {
failure_latch->Set(std::move(trailing_metadata));
}
}
return Empty{};
}),
[](Empty) {});
// Finally assemble the main call promise:
// Receive initial metadata from the client and start the promise up the
// filter stack.
// Upon completion, send trailing metadata to the client and then return it
// (allowing the call code to decide on what signalling to give the
// application).
struct CleanupPollingEntityLatch {
void operator()(Latch<grpc_polling_entity>* latch) {
if (!latch->is_set()) latch->Set(grpc_polling_entity());
}
};
auto cleanup_polling_entity_latch =
std::unique_ptr<Latch<grpc_polling_entity>, CleanupPollingEntityLatch>(
&call_data->polling_entity_latch);
struct CleanupSendInitialMetadata {
void operator()(CallData* call_data) {
call_data->server_initial_metadata.receiver.CloseWithError();
}
};
auto cleanup_send_initial_metadata =
std::unique_ptr<CallData, CleanupSendInitialMetadata>(call_data);
return Map(
Seq(std::move(recv_initial_metadata_then_run_promise),
std::move(send_trailing_metadata)),
[cleanup_polling_entity_latch = std::move(cleanup_polling_entity_latch),
cleanup_send_initial_metadata = std::move(cleanup_send_initial_metadata),
stream = std::move(stream)](ServerMetadataHandle md) {
stream->set_finished();
return md;
});
}
#endif
template <ArenaPromise<ServerMetadataHandle> (*make_call_promise)(
Transport*, CallArgs, NextPromiseFactory)>
grpc_channel_filter MakeConnectedFilter() {
// Create a vtable that contains both the legacy call methods (for filter
// stack based calls) and the new promise based method for creating
// promise based calls (the latter iff make_call_promise != nullptr). In
// this way the filter can be inserted into either kind of channel stack,
// and only if all the filters in the stack are promise based will the
// call be promise based.
auto make_call_wrapper = +[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next) {
Transport* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
return make_call_promise(transport, std::move(call_args), std::move(next));
};
return {
connected_channel_start_transport_stream_op_batch,
make_call_promise != nullptr ? make_call_wrapper : nullptr,
/* init_call: */ nullptr,
connected_channel_start_transport_op,
sizeof(call_data),
connected_channel_init_call_elem,
set_pollset_or_pollset_set,
connected_channel_destroy_call_elem,
sizeof(channel_data),
connected_channel_init_channel_elem,
+[](grpc_channel_stack* channel_stack, grpc_channel_element* elem) {
// HACK(ctiller): increase call stack size for the channel to make
// space for channel data. We need a cleaner (but performant) way to
// do this, and I'm not sure what that is yet. This is only "safe"
// because call stacks place no additional data after the last call
// element, and the last call element MUST be the connected channel.
auto* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
if (transport->filter_stack_transport() != nullptr) {
channel_stack->call_stack_size +=
transport->filter_stack_transport()->SizeOfStream();
}
},
connected_channel_destroy_channel_elem,
connected_channel_get_channel_info,
"connected",
};
}
ArenaPromise<ServerMetadataHandle> MakeClientTransportCallPromise(
Transport* transport, CallArgs call_args, NextPromiseFactory) {
auto spine = GetContext<CallContext>()->MakeCallSpine(std::move(call_args));
transport->client_transport()->StartCall(CallHandler{spine});
return spine->PullServerTrailingMetadata();
}
const grpc_channel_filter kClientPromiseBasedTransportFilter =
MakeConnectedFilter<MakeClientTransportCallPromise>();
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
const grpc_channel_filter kClientEmulatedFilter =
MakeConnectedFilter<MakeClientCallPromise>();
#else
const grpc_channel_filter kClientEmulatedFilter =
MakeConnectedFilter<nullptr>();
#endif
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
const grpc_channel_filter kServerEmulatedFilter =
MakeConnectedFilter<MakeServerCallPromise>();
#else
const grpc_channel_filter kServerEmulatedFilter =
MakeConnectedFilter<nullptr>();
#endif
// noop filter for the v3 stack: placeholder for now because other code requires
// we have a terminator.
// TODO(ctiller): delete when v3 transition is complete.
const grpc_channel_filter kServerPromiseBasedTransportFilter = {
const grpc_channel_filter kPromiseBasedTransportFilter = {
nullptr,
[](grpc_channel_element*, CallArgs, NextPromiseFactory)
-> ArenaPromise<ServerMetadataHandle> { Crash("not implemented"); },
/* init_call: */ [](grpc_channel_element*, CallSpineInterface*) {},
connected_channel_start_transport_op,
0,
nullptr,
set_pollset_or_pollset_set,
nullptr,
sizeof(channel_data),
connected_channel_init_channel_elem,
+[](grpc_channel_element*, grpc_channel_element_args*) {
return absl::InternalError(
"Cannot use filter based stack with promise based transports");
},
+[](grpc_channel_stack*, grpc_channel_element*) {},
connected_channel_destroy_channel_elem,
connected_channel_get_channel_info,
@ -937,7 +301,6 @@ bool TransportSupportsServerPromiseBasedCalls(const ChannelArgs& args) {
auto* transport = args.GetObject<Transport>();
return transport->server_transport() != nullptr;
}
} // namespace
void RegisterConnectedChannel(CoreConfiguration::Builder* builder) {
@ -950,31 +313,30 @@ void RegisterConnectedChannel(CoreConfiguration::Builder* builder) {
// Option 1, and our ideal: the transport supports promise based calls,
// and so we simply use the transport directly.
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL,
&kClientPromiseBasedTransportFilter)
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kPromiseBasedTransportFilter)
.Terminal()
.If(TransportSupportsClientPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL,
&kClientPromiseBasedTransportFilter)
&kPromiseBasedTransportFilter)
.Terminal()
.If(TransportSupportsClientPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_SERVER_CHANNEL, &kServerPromiseBasedTransportFilter)
->RegisterFilter(GRPC_SERVER_CHANNEL, &kPromiseBasedTransportFilter)
.Terminal()
.If(TransportSupportsServerPromiseBasedCalls);
// Option 2: the transport does not support promise based calls.
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kClientEmulatedFilter)
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kConnectedFilter)
.Terminal()
.IfNot(TransportSupportsClientPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, &kClientEmulatedFilter)
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, &kConnectedFilter)
.Terminal()
.IfNot(TransportSupportsClientPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_SERVER_CHANNEL, &kServerEmulatedFilter)
->RegisterFilter(GRPC_SERVER_CHANNEL, &kConnectedFilter)
.Terminal()
.IfNot(TransportSupportsServerPromiseBasedCalls);
}

@ -90,9 +90,6 @@ BaseCallData::BaseCallData(
arena_(args->arena),
call_combiner_(args->call_combiner),
deadline_(args->deadline),
call_context_(flags & kFilterExaminesCallContext
? arena_->New<CallContext>(nullptr)
: nullptr),
server_initial_metadata_pipe_(
flags & kFilterExaminesServerInitialMetadata
? arena_->New<Pipe<ServerMetadataHandle>>(arena_)
@ -280,7 +277,7 @@ BaseCallData::Flusher::~Flusher() {
};
for (size_t i = 1; i < release_.size(); i++) {
auto* batch = release_[i];
if (call_->call_context_ != nullptr && call_->call_context_->traced()) {
if (call_->call() != nullptr && call_->call()->traced()) {
batch->is_traced = true;
}
if (grpc_trace_channel.enabled()) {
@ -300,7 +297,7 @@ BaseCallData::Flusher::~Flusher() {
gpr_log(GPR_INFO, "FLUSHER:forward batch: %s",
grpc_transport_stream_op_batch_string(release_[0], false).c_str());
}
if (call_->call_context_ != nullptr && call_->call_context_->traced()) {
if (call_->call() != nullptr && call_->call()->traced()) {
release_[0]->is_traced = true;
}
grpc_call_next_op(call_->elem(), release_[0]);

@ -75,12 +75,6 @@
namespace grpc_core {
// HACK: If a filter has this type as a base class it will be skipped in
// v3 filter stacks. This is a temporary measure to allow the v3 filter stack
// to be bought up whilst some tests inadvertently rely on hard to convert
// filters.
class HackyHackyHackySkipInV3FilterStacks {};
class ChannelFilter {
public:
class Args {
@ -608,220 +602,6 @@ inline void InterceptClientToServerMessage(const NoInterceptor*,
FilterCallData<Derived>*,
const CallArgs&) {}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
ServerMetadataHandle (Derived::Call::*fn)(const Message&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return
[call, call_spine](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnClientToServerMessage(*msg);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
void (Derived::Call::*fn)(const Message&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine*) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call](MessageHandle msg) -> absl::optional<MessageHandle> {
call->OnClientToServerMessage(*msg);
return std::move(msg);
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
ServerMetadataHandle (Derived::Call::*fn)(const Message&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnClientToServerMessage(*msg, channel);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
MessageHandle (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel, PipeBasedCallSpine*) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, channel](MessageHandle msg) {
return call->OnClientToServerMessage(std::move(msg), channel);
};
}
template <typename Derived>
inline auto InterceptClientToServerMessageHandler(
absl::StatusOr<MessageHandle> (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
return [call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto r = call->OnClientToServerMessage(std::move(msg), channel);
if (r.ok()) return std::move(*r);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(r.status()));
return absl::nullopt;
};
}
template <typename Derived, typename HookFunction>
inline void InterceptClientToServerMessage(HookFunction fn,
const NoInterceptor*,
typename Derived::Call* call,
Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
call_spine->client_to_server_messages().receiver.InterceptAndMap(
InterceptClientToServerMessageHandler(fn, call, channel, call_spine));
}
template <typename Derived, typename HookFunction>
inline void InterceptClientToServerMessage(HookFunction fn,
void (Derived::Call::*half_close)(),
typename Derived::Call* call,
Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientToServerMessage);
DCHECK(half_close == &Derived::Call::OnClientToServerHalfClose);
call_spine->client_to_server_messages().receiver.InterceptAndMapWithHalfClose(
InterceptClientToServerMessageHandler(fn, call, channel, call_spine),
[call]() { call->OnClientToServerHalfClose(); });
}
template <typename Derived>
inline void InterceptClientToServerMessage(const NoInterceptor*,
const NoInterceptor*,
typename Derived::Call*, Derived*,
PipeBasedCallSpine*) {}
inline void InterceptClientInitialMetadata(const NoInterceptor*, void*, void*,
PipeBasedCallSpine*) {}
template <typename Derived>
inline void InterceptClientInitialMetadata(
void (Derived::Call::*fn)(ClientMetadata& md), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call](ClientMetadataHandle md) {
call->OnClientInitialMetadata(*md);
return md;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
void (Derived::Call::*fn)(ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call, channel](ClientMetadataHandle md) {
call->OnClientInitialMetadata(*md, channel);
return md;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
ServerMetadataHandle (Derived::Call::*fn)(ClientMetadata& md),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine,
call](ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto return_md = call->OnClientInitialMetadata(*md);
if (return_md == nullptr) return std::move(md);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
ServerMetadataHandle (Derived::Call::*fn)(ClientMetadata& md,
Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine, call, channel](
ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto return_md = call->OnClientInitialMetadata(*md, channel);
if (return_md == nullptr) return std::move(md);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
absl::Status (Derived::Call::*fn)(ClientMetadata& md),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine,
call](ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto status = call->OnClientInitialMetadata(*md);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptClientInitialMetadata(
absl::Status (Derived::Call::*fn)(ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call_spine, call, channel](
ClientMetadataHandle md) -> absl::optional<ClientMetadataHandle> {
auto status = call->OnClientInitialMetadata(*md, channel);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
// Returning a promise that resolves to something that can be cast to
// ServerMetadataHandle also counts
template <typename Promise, typename Derived>
absl::void_t<decltype(StatusCast<ServerMetadataHandle>(
std::declval<PromiseResult<Promise>>))>
InterceptClientInitialMetadata(Promise (Derived::Call::*promise_factory)(
ClientMetadata& md, Derived* channel),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(promise_factory == &Derived::Call::OnClientInitialMetadata);
call_spine->client_initial_metadata().receiver.InterceptAndMap(
[call, call_spine, channel](ClientMetadataHandle md) {
ClientMetadata& md_ref = *md;
return Map(call->OnClientInitialMetadata(md_ref, channel),
[md = std::move(md),
call_spine](PromiseResult<Promise> status) mutable
-> absl::optional<ClientMetadataHandle> {
if (IsStatusOk(status)) return std::move(md);
call_spine->PushServerTrailingMetadata(
StatusCast<ServerMetadataHandle>(std::move(status)));
return absl::nullopt;
});
});
}
template <typename CallArgs>
inline void InterceptServerInitialMetadata(const NoInterceptor*, void*,
const CallArgs&) {}
@ -885,67 +665,6 @@ inline void InterceptServerInitialMetadata(
});
}
inline void InterceptServerInitialMetadata(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerInitialMetadata(
void (Derived::Call::*fn)(ServerMetadata&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call](ServerMetadataHandle md) {
call->OnServerInitialMetadata(*md);
return md;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
absl::Status (Derived::Call::*fn)(ServerMetadata&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, call_spine](
ServerMetadataHandle md) -> absl::optional<ServerMetadataHandle> {
auto status = call->OnServerInitialMetadata(*md);
if (status.ok()) return std::move(md);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
void (Derived::Call::*fn)(ServerMetadata&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, channel](ServerMetadataHandle md) {
call->OnServerInitialMetadata(*md, channel);
return md;
});
}
template <typename Derived>
inline void InterceptServerInitialMetadata(
absl::Status (Derived::Call::*fn)(ServerMetadata&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerInitialMetadata);
call_spine->server_initial_metadata().sender.InterceptAndMap(
[call, call_spine, channel](
ServerMetadataHandle md) -> absl::optional<ServerMetadataHandle> {
auto status = call->OnServerInitialMetadata(*md, channel);
if (status.ok()) return std::move(md);
call_spine->PullServerTrailingMetadata(
ServerMetadataFromStatus(status));
return absl::nullopt;
});
}
inline void InterceptServerToClientMessage(const NoInterceptor*, void*,
const CallArgs&) {}
@ -1020,106 +739,6 @@ inline void InterceptServerToClientMessage(
});
}
inline void InterceptServerToClientMessage(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerToClientMessage(
void (Derived::Call::*fn)(const Message&), typename Derived::Call* call,
Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call](MessageHandle msg) -> absl::optional<MessageHandle> {
call->OnServerToClientMessage(*msg);
return std::move(msg);
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
ServerMetadataHandle (Derived::Call::*fn)(const Message&),
typename Derived::Call* call, Derived*, PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnServerToClientMessage(*msg);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
ServerMetadataHandle (Derived::Call::*fn)(const Message&, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto return_md = call->OnServerToClientMessage(*msg, channel);
if (return_md == nullptr) return std::move(msg);
call_spine->PushServerTrailingMetadata(std::move(return_md));
return absl::nullopt;
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
MessageHandle (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, channel](MessageHandle msg) {
return call->OnServerToClientMessage(std::move(msg), channel);
});
}
template <typename Derived>
inline void InterceptServerToClientMessage(
absl::StatusOr<MessageHandle> (Derived::Call::*fn)(MessageHandle, Derived*),
typename Derived::Call* call, Derived* channel,
PipeBasedCallSpine* call_spine) {
DCHECK(fn == &Derived::Call::OnServerToClientMessage);
call_spine->server_to_client_messages().sender.InterceptAndMap(
[call, call_spine,
channel](MessageHandle msg) -> absl::optional<MessageHandle> {
auto r = call->OnServerToClientMessage(std::move(msg), channel);
if (r.ok()) return std::move(*r);
call_spine->PushServerTrailingMetadata(
ServerMetadataFromStatus(r.status()));
return absl::nullopt;
});
}
inline void InterceptServerTrailingMetadata(const NoInterceptor*, void*, void*,
CallSpineInterface*) {}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
void (Derived::Call::*)(ServerMetadata&), typename Derived::Call*, Derived*,
PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
void (Derived::Call::*)(ServerMetadata&, Derived*), typename Derived::Call*,
Derived*, PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
template <typename Derived>
inline void InterceptServerTrailingMetadata(
absl::Status (Derived::Call::*)(ServerMetadata&), typename Derived::Call*,
Derived*, PipeBasedCallSpine*) {
gpr_log(GPR_ERROR,
"InterceptServerTrailingMetadata not available for call v2.5");
}
inline void InterceptFinalize(const NoInterceptor*, void*, void*) {}
template <class Call>
@ -1221,29 +840,6 @@ template <typename Derived>
class ImplementChannelFilter : public ChannelFilter,
public ImplementChannelFilterTag {
public:
// Natively construct a v3 call.
void InitCall(CallSpineInterface* call_spine) {
typename Derived::Call* call =
GetContext<Arena>()
->ManagedNew<promise_filter_detail::CallWrapper<Derived>>(
static_cast<Derived*>(this));
auto* c = DownCast<PipeBasedCallSpine*>(call_spine);
auto* d = static_cast<Derived*>(this);
promise_filter_detail::InterceptClientInitialMetadata(
&Derived::Call::OnClientInitialMetadata, call, d, c);
promise_filter_detail::InterceptClientToServerMessage(
&Derived::Call::OnClientToServerMessage,
&Derived::Call::OnClientToServerHalfClose, call, d, c);
promise_filter_detail::InterceptServerInitialMetadata(
&Derived::Call::OnServerInitialMetadata, call, d, c);
promise_filter_detail::InterceptServerToClientMessage(
&Derived::Call::OnServerToClientMessage, call, d, c);
promise_filter_detail::InterceptServerTrailingMetadata(
&Derived::Call::OnServerTrailingMetadata, call, d, c);
promise_filter_detail::InterceptFinalize(&Derived::Call::OnFinalize, d,
call);
}
// Polyfill for the original promise scheme.
// Allows writing v3 filters that work with v2 stacks.
// (and consequently also v1 stacks since we can polyfill back to that too).
@ -1344,13 +940,14 @@ class BaseCallData : public Activity, private Wakeable {
virtual void StartBatch(grpc_transport_stream_op_batch* batch) = 0;
Call* call() { return arena_->GetContext<Call>(); }
protected:
class ScopedContext : public promise_detail::Context<Arena>,
public promise_detail::Context<grpc_polling_entity>,
public promise_detail::Context<CallFinalization>,
public promise_detail::Context<
grpc_event_engine::experimental::EventEngine>,
public promise_detail::Context<CallContext> {
grpc_event_engine::experimental::EventEngine> {
public:
explicit ScopedContext(BaseCallData* call_data)
: promise_detail::Context<Arena>(call_data->arena_),
@ -1358,8 +955,7 @@ class BaseCallData : public Activity, private Wakeable {
call_data->pollent_.load(std::memory_order_acquire)),
promise_detail::Context<CallFinalization>(&call_data->finalization_),
promise_detail::Context<grpc_event_engine::experimental::EventEngine>(
call_data->event_engine_),
promise_detail::Context<CallContext>(call_data->call_context_) {}
call_data->event_engine_) {}
};
class Flusher {
@ -1705,7 +1301,6 @@ class BaseCallData : public Activity, private Wakeable {
CallCombiner* const call_combiner_;
const Timestamp deadline_;
CallFinalization finalization_;
CallContext* call_context_ = nullptr;
std::atomic<grpc_polling_entity*> pollent_{nullptr};
Pipe<ServerMetadataHandle>* const server_initial_metadata_pipe_;
SendMessage* const send_message_;
@ -2061,67 +1656,15 @@ struct ChannelFilterWithFlagsMethods {
// ChannelArgs channel_args, ChannelFilter::Args filter_args);
// };
template <typename F, FilterEndpoint kEndpoint, uint8_t kFlags = 0>
absl::enable_if_t<
std::is_base_of<ChannelFilter, F>::value &&
!std::is_base_of<ImplementChannelFilterTag, F>::value &&
!std::is_base_of<HackyHackyHackySkipInV3FilterStacks, F>::value,
grpc_channel_filter>
MakePromiseBasedFilter(const char* name) {
using CallData = promise_filter_detail::CallData<kEndpoint>;
return grpc_channel_filter{
// start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
nullptr,
// start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data
sizeof(CallData),
// init_call_elem
promise_filter_detail::CallDataFilterWithFlagsMethods<
CallData, kFlags>::InitCallElem,
// set_pollset_or_pollset_set
promise_filter_detail::BaseCallDataMethods::SetPollsetOrPollsetSet,
// destroy_call_elem
promise_filter_detail::CallDataFilterWithFlagsMethods<
CallData, kFlags>::DestroyCallElem,
// sizeof_channel_data
sizeof(F),
// init_channel_elem
promise_filter_detail::ChannelFilterWithFlagsMethods<
F, kFlags>::InitChannelElem,
// post_init_channel_elem
promise_filter_detail::ChannelFilterMethods::PostInitChannelElem,
// destroy_channel_elem
promise_filter_detail::ChannelFilterWithFlagsMethods<
F, kFlags>::DestroyChannelElem,
// get_channel_info
promise_filter_detail::ChannelFilterMethods::GetChannelInfo,
// name
name,
};
}
template <typename F, FilterEndpoint kEndpoint, uint8_t kFlags = 0>
absl::enable_if_t<
std::is_base_of<HackyHackyHackySkipInV3FilterStacks, F>::value,
grpc_channel_filter>
absl::enable_if_t<std::is_base_of<ChannelFilter, F>::value &&
!std::is_base_of<ImplementChannelFilterTag, F>::value,
grpc_channel_filter>
MakePromiseBasedFilter(const char* name) {
using CallData = promise_filter_detail::CallData<kEndpoint>;
return grpc_channel_filter{
// start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
[](grpc_channel_element* elem, CallSpineInterface*) {
GRPC_LOG_EVERY_N_SEC(
1, GPR_ERROR,
"gRPC V3 call stack in use, with a filter ('%s') that is not V3.",
elem->filter->name);
},
// start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data
@ -2160,11 +1703,6 @@ MakePromiseBasedFilter(const char* name) {
return grpc_channel_filter{
// start_transport_stream_op_batch
promise_filter_detail::BaseCallDataMethods::StartTransportStreamOpBatch,
// make_call_promise
promise_filter_detail::ChannelFilterMethods::MakeCallPromise,
[](grpc_channel_element* elem, CallSpineInterface* args) {
static_cast<F*>(elem->channel_data)->InitCall(args);
},
// start_transport_op
promise_filter_detail::ChannelFilterMethods::StartTransportOp,
// sizeof_call_data

@ -30,7 +30,7 @@ bool UseEventEngineClient() {
return grpc_core::IsEventEngineClientEnabled();
#elif defined(GPR_WINDOWS) && !defined(GRPC_DO_NOT_INSTANTIATE_POSIX_POLLER)
return grpc_core::IsEventEngineClientEnabled();
#elif defined(GRPC_IOS_EVENT_ENGINE_CLIENT)
#elif GRPC_IOS_EVENT_ENGINE_CLIENT
return true;
#else
return false;

@ -85,7 +85,9 @@ bool ThreadyEventEngine::IsWorkerThread() {
absl::StatusOr<std::unique_ptr<EventEngine::DNSResolver>>
ThreadyEventEngine::GetDNSResolver(
const DNSResolver::ResolverOptions& options) {
return std::make_unique<ThreadyDNSResolver>(*impl_->GetDNSResolver(options));
return std::make_unique<ThreadyDNSResolver>(
*impl_->GetDNSResolver(options),
std::static_pointer_cast<ThreadyEventEngine>(shared_from_this()));
}
void ThreadyEventEngine::Run(Closure* closure) {
@ -116,10 +118,10 @@ void ThreadyEventEngine::ThreadyDNSResolver::LookupHostname(
LookupHostnameCallback on_resolve, absl::string_view name,
absl::string_view default_port) {
return impl_->LookupHostname(
[this, on_resolve = std::move(on_resolve)](
[engine = engine_, on_resolve = std::move(on_resolve)](
absl::StatusOr<std::vector<ResolvedAddress>> addresses) mutable {
engine_->Asynchronously([on_resolve = std::move(on_resolve),
addresses = std::move(addresses)]() mutable {
engine->Asynchronously([on_resolve = std::move(on_resolve),
addresses = std::move(addresses)]() mutable {
on_resolve(std::move(addresses));
});
},
@ -129,13 +131,12 @@ void ThreadyEventEngine::ThreadyDNSResolver::LookupHostname(
void ThreadyEventEngine::ThreadyDNSResolver::LookupSRV(
LookupSRVCallback on_resolve, absl::string_view name) {
return impl_->LookupSRV(
[this, on_resolve = std::move(on_resolve)](
[engine = engine_, on_resolve = std::move(on_resolve)](
absl::StatusOr<std::vector<SRVRecord>> records) mutable {
return engine_->Asynchronously(
[on_resolve = std::move(on_resolve),
records = std::move(records)]() mutable {
on_resolve(std::move(records));
});
return engine->Asynchronously([on_resolve = std::move(on_resolve),
records = std::move(records)]() mutable {
on_resolve(std::move(records));
});
},
name);
}
@ -143,10 +144,10 @@ void ThreadyEventEngine::ThreadyDNSResolver::LookupSRV(
void ThreadyEventEngine::ThreadyDNSResolver::LookupTXT(
LookupTXTCallback on_resolve, absl::string_view name) {
return impl_->LookupTXT(
[this, on_resolve = std::move(on_resolve)](
[engine = engine_, on_resolve = std::move(on_resolve)](
absl::StatusOr<std::vector<std::string>> record) mutable {
return engine_->Asynchronously([on_resolve = std::move(on_resolve),
record = std::move(record)]() mutable {
return engine->Asynchronously([on_resolve = std::move(on_resolve),
record = std::move(record)]() mutable {
on_resolve(std::move(record));
});
},

@ -77,8 +77,9 @@ class ThreadyEventEngine final : public EventEngine {
private:
class ThreadyDNSResolver final : public DNSResolver {
public:
explicit ThreadyDNSResolver(std::unique_ptr<DNSResolver> impl)
: impl_(std::move(impl)) {}
ThreadyDNSResolver(std::unique_ptr<DNSResolver> impl,
std::shared_ptr<ThreadyEventEngine> engine)
: impl_(std::move(impl)), engine_(std::move(engine)) {}
void LookupHostname(LookupHostnameCallback on_resolve,
absl::string_view name,
absl::string_view default_port) override;
@ -89,7 +90,7 @@ class ThreadyEventEngine final : public EventEngine {
private:
std::unique_ptr<DNSResolver> impl_;
ThreadyEventEngine* engine_;
std::shared_ptr<ThreadyEventEngine> engine_;
};
void Asynchronously(absl::AnyInvocable<void()> fn);

@ -76,24 +76,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration";
@ -131,12 +116,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace
namespace grpc_core {
@ -175,16 +154,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport",
description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport,
required_experiments_promise_based_inproc_transport, 1, false, false},
additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true},
{"schedule_cancellation_over_write",
@ -210,8 +183,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, false, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
};
} // namespace grpc_core
@ -270,24 +241,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration";
@ -325,12 +281,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace
namespace grpc_core {
@ -369,16 +319,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport",
description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport,
required_experiments_promise_based_inproc_transport, 1, false, false},
additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true},
{"schedule_cancellation_over_write",
@ -404,8 +348,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, false, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
};
} // namespace grpc_core
@ -464,24 +406,9 @@ const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
const char* const description_promise_based_client_call =
"If set, use the new gRPC promise based call code when it's appropriate "
"(ie when all filters in a stack are promise based)";
const char* const additional_constraints_promise_based_client_call = "{}";
const uint8_t required_experiments_promise_based_client_call[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener)};
const char* const description_chaotic_good =
"If set, enable the chaotic good load transport (this is mostly here for "
"testing)";
const char* const additional_constraints_chaotic_good = "{}";
const uint8_t required_experiments_chaotic_good[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_promise_based_inproc_transport =
"Use promises for the in-process transport.";
const char* const additional_constraints_promise_based_inproc_transport = "{}";
const uint8_t required_experiments_promise_based_inproc_transport[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdPromiseBasedClientCall)};
const char* const description_rstpit =
"On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short "
"duration";
@ -519,12 +446,6 @@ const char* const description_work_serializer_dispatch =
const char* const additional_constraints_work_serializer_dispatch = "{}";
const uint8_t required_experiments_work_serializer_dispatch[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient)};
const char* const description_call_v3 = "Promise-based call version 3.";
const char* const additional_constraints_call_v3 = "{}";
const uint8_t required_experiments_call_v3[] = {
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineClient),
static_cast<uint8_t>(grpc_core::kExperimentIdEventEngineListener),
static_cast<uint8_t>(grpc_core::kExperimentIdWorkSerializerDispatch)};
} // namespace
namespace grpc_core {
@ -563,16 +484,10 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_client_call", description_promise_based_client_call,
additional_constraints_promise_based_client_call,
required_experiments_promise_based_client_call, 2, false, true},
{"chaotic_good", description_chaotic_good,
additional_constraints_chaotic_good, required_experiments_chaotic_good, 1,
false, true},
{"promise_based_inproc_transport",
description_promise_based_inproc_transport,
additional_constraints_promise_based_inproc_transport,
required_experiments_promise_based_inproc_transport, 1, false, false},
additional_constraints_promise_based_inproc_transport, nullptr, 0, false,
false},
{"rstpit", description_rstpit, additional_constraints_rstpit, nullptr, 0,
false, true},
{"schedule_cancellation_over_write",
@ -598,8 +513,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
{"work_serializer_dispatch", description_work_serializer_dispatch,
additional_constraints_work_serializer_dispatch,
required_experiments_work_serializer_dispatch, 1, true, true},
{"call_v3", description_call_v3, additional_constraints_call_v3,
required_experiments_call_v3, 3, false, false},
};
} // namespace grpc_core

@ -76,8 +76,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -90,7 +88,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; }
inline bool IsCallV3Enabled() { return false; }
#elif defined(GPR_WINDOWS)
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -114,8 +111,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -128,7 +123,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_CLEARS_TIME_CACHE
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; }
inline bool IsCallV3Enabled() { return false; }
#else
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -152,8 +146,6 @@ inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedClientCallEnabled() { return false; }
inline bool IsChaoticGoodEnabled() { return false; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
inline bool IsRstpitEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
@ -167,7 +159,6 @@ inline bool IsUnconstrainedMaxQuotaBufferSizeEnabled() { return false; }
inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_SERIALIZER_DISPATCH
inline bool IsWorkSerializerDispatchEnabled() { return true; }
inline bool IsCallV3Enabled() { return false; }
#endif
#else
@ -187,8 +178,6 @@ enum ExperimentIds {
kExperimentIdMultiping,
kExperimentIdPeerStateBasedFraming,
kExperimentIdPickFirstNew,
kExperimentIdPromiseBasedClientCall,
kExperimentIdChaoticGood,
kExperimentIdPromiseBasedInprocTransport,
kExperimentIdRstpit,
kExperimentIdScheduleCancellationOverWrite,
@ -199,7 +188,6 @@ enum ExperimentIds {
kExperimentIdUnconstrainedMaxQuotaBufferSize,
kExperimentIdWorkSerializerClearsTimeCache,
kExperimentIdWorkSerializerDispatch,
kExperimentIdCallV3,
kNumExperiments
};
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
@ -262,14 +250,6 @@ inline bool IsPeerStateBasedFramingEnabled() {
inline bool IsPickFirstNewEnabled() {
return IsExperimentEnabled(kExperimentIdPickFirstNew);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
inline bool IsPromiseBasedClientCallEnabled() {
return IsExperimentEnabled(kExperimentIdPromiseBasedClientCall);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_CHAOTIC_GOOD
inline bool IsChaoticGoodEnabled() {
return IsExperimentEnabled(kExperimentIdChaoticGood);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_INPROC_TRANSPORT
inline bool IsPromiseBasedInprocTransportEnabled() {
return IsExperimentEnabled(kExperimentIdPromiseBasedInprocTransport);
@ -310,10 +290,6 @@ inline bool IsWorkSerializerClearsTimeCacheEnabled() {
inline bool IsWorkSerializerDispatchEnabled() {
return IsExperimentEnabled(kExperimentIdWorkSerializerDispatch);
}
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_V3
inline bool IsCallV3Enabled() {
return IsExperimentEnabled(kExperimentIdCallV3);
}
extern const ExperimentMetadata g_experiment_metadata[kNumExperiments];

@ -47,13 +47,6 @@
expiry: 2024/08/01
owner: vigneshbabu@google.com
test_tags: []
- name: call_v3
description: Promise-based call version 3.
expiry: 2024/06/01
owner: ctiller@google.com
test_tags: []
requires: ["work_serializer_dispatch", "event_engine_listener", "event_engine_client"]
allow_in_fuzzing_config: false
- name: canary_client_privacy
description:
If set, canary client privacy
@ -61,13 +54,6 @@
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false
- name: chaotic_good
description:
If set, enable the chaotic good load transport (this is mostly here for testing)
expiry: 2024/09/09
owner: ctiller@google.com
requires: [promise_based_client_call]
test_tags: [core_end2end_test]
- name: client_privacy
description:
If set, client privacy
@ -154,14 +140,6 @@
expiry: 2024/07/30
owner: roth@google.com
test_tags: ["lb_unit_test", "cpp_lb_end2end_test", "xds_end2end_test"]
- name: promise_based_client_call
description:
If set, use the new gRPC promise based call code when it's appropriate
(ie when all filters in a stack are promise based)
expiry: 2024/06/14
owner: ctiller@google.com
test_tags: ["core_end2end_test", "lame_client_test"]
requires: ["event_engine_listener", "event_engine_client"]
- name: promise_based_inproc_transport
description:
Use promises for the in-process transport.
@ -169,7 +147,6 @@
owner: ctiller@google.com
test_tags: []
allow_in_fuzzing_config: false # experiment currently crashes if enabled
requires: [promise_based_client_call]
- name: rstpit
description:
On RST_STREAM on a server, reduce MAX_CONCURRENT_STREAMS for a short duration

@ -335,6 +335,7 @@ class DualRefCounted : public Impl {
gpr_log(GPR_INFO, "%s:%p weak_ref %d -> %d; (refs=%d)", trace_, this,
weak_refs, weak_refs + 1, strong_refs);
}
if (strong_refs == 0) CHECK_NE(weak_refs, 0u);
#else
refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_relaxed);
#endif
@ -351,6 +352,7 @@ class DualRefCounted : public Impl {
this, location.file(), location.line(), weak_refs, weak_refs + 1,
strong_refs, reason);
}
if (strong_refs == 0) CHECK_NE(weak_refs, 0u);
#else
// Use conditionally-important parameters
(void)location;

@ -36,15 +36,15 @@ class DumpArgs {
template <typename... Args>
explicit DumpArgs(const char* arg_string, const Args&... args)
: arg_string_(arg_string) {
do_these_things(
{AddDumper([a = &args](std::ostream& os) { os << *a; })...});
do_these_things({AddDumper(&args)...});
}
friend std::ostream& operator<<(std::ostream& out, const DumpArgs& args);
private:
int AddDumper(absl::AnyInvocable<void(std::ostream&) const> dumper) {
arg_dumpers_.push_back(std::move(dumper));
template <typename T>
int AddDumper(T* p) {
arg_dumpers_.push_back([p](std::ostream& os) { os << *p; });
return 0;
}
@ -66,4 +66,4 @@ class DumpArgs {
#define GRPC_DUMP_ARGS(...) \
grpc_core::dump_args_detail::DumpArgs(#__VA_ARGS__, __VA_ARGS__)
#endif // GRPC_SRC_CORE_LIB_GPRPP_DUMP_ARGS_H
#endif // GRPC_SRC_CORE_LIB_GPRPP_DUMP_ARGS_H

@ -246,6 +246,15 @@ struct UnrefCallDtor {
}
};
// Call the Destroy method on the object. This is useful when the object
// needs precise control of how it's deallocated.
struct UnrefCallDestroy {
template <typename T>
void operator()(T* p) const {
p->Destroy();
}
};
// A base class for reference-counted objects.
// New objects should be created via new and start with a refcount of 1.
// When the refcount reaches 0, executes the specified UnrefBehavior.

@ -63,17 +63,19 @@ class SingleSetPtr {
void Reset() { Delete(p_.exchange(nullptr, std::memory_order_acq_rel)); }
bool is_set() const {
T* p = p_.load(std::memory_order_acquire);
T* p = Get();
return p != nullptr;
}
T* Get() const { return p_.load(std::memory_order_acquire); }
T* operator->() const {
T* p = p_.load(std::memory_order_acquire);
T* p = Get();
DCHECK_NE(p, nullptr);
return p;
}
T& operator*() const { return *operator->(); }
T& operator*() const { return *Get(); }
private:
static void Delete(T* p) {

@ -80,6 +80,20 @@ auto OnCancel(MainFn main_fn, CancelFn cancel_fn) {
};
}
// Similar to OnCancel, but returns a factory that uses main_fn to construct the
// resulting promise. If the factory is dropped without being called, cancel_fn
// is called.
template <typename MainFn, typename CancelFn>
auto OnCancelFactory(MainFn main_fn, CancelFn cancel_fn) {
return [on_cancel =
cancel_callback_detail::Handler<CancelFn>(std::move(cancel_fn)),
main_fn = std::move(main_fn)]() mutable {
auto r = main_fn();
on_cancel.Done();
return r;
};
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_CANCEL_CALLBACK_H

@ -99,8 +99,7 @@ class BasicSeqIter {
}
cur_ = next;
state_.~State();
Construct(&state_,
Traits::template CallSeqFactory(f_, *cur_, std::move(arg)));
Construct(&state_, Traits::CallSeqFactory(f_, *cur_, std::move(arg)));
return PollNonEmpty();
});
}

@ -647,8 +647,9 @@ template <typename Factory, typename OnComplete>
void Party::BulkSpawner::Spawn(absl::string_view name, Factory promise_factory,
OnComplete on_complete) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "%s[bulk_spawn] On %p queue %s",
party_->DebugTag().c_str(), this, std::string(name).c_str());
gpr_log(GPR_INFO, "%s[bulk_spawn] On %p queue %s (%" PRIdPTR " bytes)",
party_->DebugTag().c_str(), this, std::string(name).c_str(),
sizeof(ParticipantImpl<Factory, OnComplete>));
}
participants_[num_participants_++] = new ParticipantImpl<Factory, OnComplete>(
name, std::move(promise_factory), std::move(on_complete));

File diff suppressed because it is too large Load Diff

@ -86,7 +86,7 @@ class Call : public CppImplOf<Call, grpc_call>,
public grpc_event_engine::experimental::EventEngine::
Closure /* for deadlines */ {
public:
virtual Arena* arena() = 0;
Arena* arena() { return arena_.get(); }
bool is_client() const { return is_client_; }
virtual bool Completed() = 0;
@ -112,10 +112,7 @@ class Call : public CppImplOf<Call, grpc_call>,
return deadline_;
}
grpc_compression_algorithm test_only_compression_algorithm() {
return incoming_compression_algorithm_;
}
uint32_t test_only_message_flags() { return test_only_last_message_flags_; }
virtual uint32_t test_only_message_flags() = 0;
CompressionAlgorithmSet encodings_accepted_by_peer() {
return encodings_accepted_by_peer_;
}
@ -125,14 +122,20 @@ class Call : public CppImplOf<Call, grpc_call>,
virtual grpc_call_stack* call_stack() = 0;
// Return the EventEngine used for this call's async execution.
virtual grpc_event_engine::experimental::EventEngine* event_engine()
const = 0;
grpc_event_engine::experimental::EventEngine* event_engine() const {
return event_engine_;
}
// Implementation of EventEngine::Closure, called when deadline expires
void Run() final;
gpr_cycle_counter start_time() const { return start_time_; }
void set_traced(bool traced) { traced_ = traced; }
bool traced() const { return traced_; }
virtual grpc_compression_algorithm incoming_compression_algorithm() = 0;
protected:
// The maximum number of concurrent batches possible.
// Based upon the maximum number of individually queueable ops in the batch
@ -160,11 +163,8 @@ class Call : public CppImplOf<Call, grpc_call>,
Call* sibling_prev = nullptr;
};
Call(bool is_client, Timestamp send_deadline,
grpc_event_engine::experimental::EventEngine* event_engine)
: send_deadline_(send_deadline),
is_client_(is_client),
event_engine_(event_engine) {}
Call(bool is_client, Timestamp send_deadline, RefCountedPtr<Arena> arena,
grpc_event_engine::experimental::EventEngine* event_engine);
~Call() override = default;
ParentCall* GetOrCreateParentCall();
@ -200,12 +200,6 @@ class Call : public CppImplOf<Call, grpc_call>,
// internal headers against external modification.
void PrepareOutgoingInitialMetadata(const grpc_op& op,
grpc_metadata_batch& md);
void NoteLastMessageFlags(uint32_t flags) {
test_only_last_message_flags_ = flags;
}
grpc_compression_algorithm incoming_compression_algorithm() const {
return incoming_compression_algorithm_;
}
void HandleCompressionAlgorithmDisabled(
grpc_compression_algorithm compression_algorithm) GPR_ATTRIBUTE_NOINLINE;
@ -214,20 +208,22 @@ class Call : public CppImplOf<Call, grpc_call>,
virtual grpc_compression_options compression_options() = 0;
virtual void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) = 0;
private:
const RefCountedPtr<Arena> arena_;
std::atomic<ParentCall*> parent_call_{nullptr};
ChildCall* child_ = nullptr;
Timestamp send_deadline_;
const bool is_client_;
// flag indicating that cancellation is inherited
bool cancellation_is_inherited_ = false;
// Compression algorithm for *incoming* data
grpc_compression_algorithm incoming_compression_algorithm_ =
GRPC_COMPRESS_NONE;
// Is this call traced?
bool traced_ = false;
// Supported encodings (compression algorithms), a bitset.
// Always support no compression.
CompressionAlgorithmSet encodings_accepted_by_peer_{GRPC_COMPRESS_NONE};
uint32_t test_only_last_message_flags_ = 0;
// Peer name is protected by a mutex because it can be accessed by the
// application at the same moment as it is being set by the completion
// of the recv_initial_metadata op. The mutex should be mostly uncontended.
@ -247,66 +243,6 @@ struct ArenaContextType<Call> {
static void Destroy(Call*) {}
};
class BasicPromiseBasedCall;
class ServerPromiseBasedCall;
// TODO(ctiller): move more call things into this type
class CallContext {
public:
explicit CallContext(BasicPromiseBasedCall* call) : call_(call) {}
// Run some action in the call activity context. This is needed to adapt some
// legacy systems to promises, and will likely disappear once that conversion
// is complete.
void RunInContext(absl::AnyInvocable<void()> fn);
// TODO(ctiller): remove this once transport APIs are promise based
void IncrementRefCount(const char* reason = "call_context");
// TODO(ctiller): remove this once transport APIs are promise based
void Unref(const char* reason = "call_context");
RefCountedPtr<CallContext> Ref() {
IncrementRefCount();
return RefCountedPtr<CallContext>(this);
}
grpc_call_stats* call_stats() { return &call_stats_; }
gpr_atm* peer_string_atm_ptr();
gpr_cycle_counter call_start_time() { return start_time_; }
void set_traced(bool traced) { traced_ = traced; }
bool traced() const { return traced_; }
// TEMPORARY HACK
// Create a call spine object for this call.
// Said object should only be created once.
// Allows interop between the v2 call stack and the v3 (which is required by
// transports).
RefCountedPtr<CallSpineInterface> MakeCallSpine(CallArgs call_args);
grpc_call* c_call();
private:
friend class PromiseBasedCall;
// Call final info.
grpc_call_stats call_stats_;
// TODO(ctiller): remove this once transport APIs are promise based and we
// don't need refcounting here.
BasicPromiseBasedCall* const call_;
gpr_cycle_counter start_time_ = gpr_get_cycle_counter();
// Is this call traced?
bool traced_ = false;
};
template <>
struct ContextType<CallContext> {};
// TODO(ctiller): remove once call-v3 finalized
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata);
} // namespace grpc_core
// Create a new call based on \a args.
@ -349,11 +285,6 @@ void* grpc_call_tracer_get(grpc_call* call);
uint8_t grpc_call_is_client(grpc_call* call);
// Get the estimated memory size for a call BESIDES the call stack. Combined
// with the size of the call stack, it helps estimate the arena size for the
// initial call.
size_t grpc_call_get_initial_size_estimate();
// Return an appropriate compression algorithm for the requested compression \a
// level in the context of \a call.
grpc_compression_algorithm grpc_call_compression_for_level(

@ -0,0 +1,286 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/call_utils.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/validate_metadata.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
void PublishMetadataArray(grpc_metadata_batch* md, grpc_metadata_array* array,
bool is_client) {
const auto md_count = md->count();
if (md_count > array->capacity) {
array->capacity =
std::max(array->capacity + md->count(), array->capacity * 3 / 2);
array->metadata = static_cast<grpc_metadata*>(
gpr_realloc(array->metadata, sizeof(grpc_metadata) * array->capacity));
}
PublishToAppEncoder encoder(array, md, is_client);
md->Encode(&encoder);
}
void CToMetadata(grpc_metadata* metadata, size_t count,
grpc_metadata_batch* b) {
for (size_t i = 0; i < count; i++) {
grpc_metadata* md = &metadata[i];
auto key = StringViewFromSlice(md->key);
// Filter "content-length metadata"
if (key == "content-length") continue;
b->Append(key, Slice(CSliceRef(md->value)),
[md](absl::string_view error, const Slice& value) {
gpr_log(GPR_DEBUG, "Append error: %s",
absl::StrCat("key=", StringViewFromSlice(md->key),
" error=", error,
" value=", value.as_string_view())
.c_str());
});
}
}
const char* GrpcOpTypeName(grpc_op_type op) {
switch (op) {
case GRPC_OP_SEND_INITIAL_METADATA:
return "SendInitialMetadata";
case GRPC_OP_SEND_MESSAGE:
return "SendMessage";
case GRPC_OP_SEND_STATUS_FROM_SERVER:
return "SendStatusFromServer";
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
return "SendCloseFromClient";
case GRPC_OP_RECV_MESSAGE:
return "RecvMessage";
case GRPC_OP_RECV_CLOSE_ON_SERVER:
return "RecvCloseOnServer";
case GRPC_OP_RECV_INITIAL_METADATA:
return "RecvInitialMetadata";
case GRPC_OP_RECV_STATUS_ON_CLIENT:
return "RecvStatusOnClient";
}
Crash("Unreachable");
}
////////////////////////////////////////////////////////////////////////
// WaitForCqEndOp
Poll<Empty> WaitForCqEndOp::operator()() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sWaitForCqEndOp[%p] %s",
Activity::current()->DebugTag().c_str(), this,
StateString(state_).c_str());
}
if (auto* n = absl::get_if<NotStarted>(&state_)) {
if (n->is_closure) {
ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(n->tag),
std::move(n->error));
return Empty{};
} else {
auto not_started = std::move(*n);
auto& started =
state_.emplace<Started>(GetContext<Activity>()->MakeOwningWaker());
grpc_cq_end_op(
not_started.cq, not_started.tag, std::move(not_started.error),
[](void* p, grpc_cq_completion*) {
auto started = static_cast<Started*>(p);
auto wakeup = std::move(started->waker);
started->done.store(true, std::memory_order_release);
wakeup.Wakeup();
},
&started, &started.completion);
}
}
auto& started = absl::get<Started>(state_);
if (started.done.load(std::memory_order_acquire)) {
return Empty{};
} else {
return Pending{};
}
}
std::string WaitForCqEndOp::StateString(const State& state) {
return Match(
state,
[](const NotStarted& x) {
return absl::StrFormat(
"NotStarted{is_closure=%s, tag=%p, error=%s, cq=%p}",
x.is_closure ? "true" : "false", x.tag, x.error.ToString(), x.cq);
},
[](const Started& x) {
return absl::StrFormat(
"Started{completion=%p, done=%s}", &x.completion,
x.done.load(std::memory_order_relaxed) ? "true" : "false");
},
[](const Invalid&) -> std::string { return "Invalid{}"; });
}
////////////////////////////////////////////////////////////////////////
// MessageReceiver
StatusFlag MessageReceiver::FinishRecvMessage(
ValueOrFailure<absl::optional<MessageHandle>> result) {
if (!result.ok()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received end-of-stream with error",
Activity::current()->DebugTag().c_str());
}
*recv_message_ = nullptr;
recv_message_ = nullptr;
return Failure{};
}
if (!result->has_value()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received end-of-stream",
Activity::current()->DebugTag().c_str());
}
*recv_message_ = nullptr;
recv_message_ = nullptr;
return Success{};
}
MessageHandle& message = **result;
test_only_last_message_flags_ = message->flags();
if ((message->flags() & GRPC_WRITE_INTERNAL_COMPRESS) &&
(incoming_compression_algorithm_ != GRPC_COMPRESS_NONE)) {
*recv_message_ = grpc_raw_compressed_byte_buffer_create(
nullptr, 0, incoming_compression_algorithm_);
} else {
*recv_message_ = grpc_raw_byte_buffer_create(nullptr, 0);
}
grpc_slice_buffer_move_into(message->payload()->c_slice_buffer(),
&(*recv_message_)->data.raw.slice_buffer);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO,
"%s[call] RecvMessage: outstanding_recv "
"finishes: received %" PRIdPTR " byte message",
Activity::current()->DebugTag().c_str(),
(*recv_message_)->data.raw.slice_buffer.length);
}
recv_message_ = nullptr;
return Success{};
}
////////////////////////////////////////////////////////////////////////
// MakeErrorString
std::string MakeErrorString(const ServerMetadata* trailing_metadata) {
std::string out = absl::StrCat(
trailing_metadata->get(GrpcStatusFromWire()).value_or(false)
? "Error received from peer"
: "Error generated by client",
"grpc_status: ",
grpc_status_code_to_string(trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN)));
if (const Slice* message =
trailing_metadata->get_pointer(GrpcMessageMetadata())) {
absl::StrAppend(&out, "\ngrpc_message: ", message->as_string_view());
}
if (auto annotations = trailing_metadata->get_pointer(GrpcStatusContext())) {
absl::StrAppend(&out, "\nStatus Context:");
for (const std::string& annotation : *annotations) {
absl::StrAppend(&out, "\n ", annotation);
}
}
return out;
}
bool ValidateMetadata(size_t count, grpc_metadata* metadata) {
if (count > INT_MAX) {
return false;
}
for (size_t i = 0; i < count; i++) {
grpc_metadata* md = &metadata[i];
if (!GRPC_LOG_IF_ERROR("validate_metadata",
grpc_validate_header_key_is_legal(md->key))) {
return false;
} else if (!grpc_is_binary_header_internal(md->key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata",
grpc_validate_header_nonbin_value_is_legal(md->value))) {
return false;
} else if (GRPC_SLICE_LENGTH(md->value) >= UINT32_MAX) {
// HTTP2 hpack encoding has a maximum limit.
return false;
}
}
return true;
}
void EndOpImmediately(grpc_completion_queue* cq, void* notify_tag,
bool is_notify_tag_closure) {
if (!is_notify_tag_closure) {
CHECK(grpc_cq_begin_op(cq, notify_tag));
grpc_cq_end_op(
cq, notify_tag, absl::OkStatus(),
[](void*, grpc_cq_completion* completion) { gpr_free(completion); },
nullptr,
static_cast<grpc_cq_completion*>(
gpr_malloc(sizeof(grpc_cq_completion))));
} else {
Closure::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(notify_tag),
absl::OkStatus());
}
}
} // namespace grpc_core

@ -0,0 +1,457 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H
#define GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/cancel_callback.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/seq.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/message.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
class PublishToAppEncoder {
public:
explicit PublishToAppEncoder(grpc_metadata_array* dest,
const grpc_metadata_batch* encoding,
bool is_client)
: dest_(dest), encoding_(encoding), is_client_(is_client) {}
void Encode(const Slice& key, const Slice& value) {
Append(key.c_slice(), value.c_slice());
}
// Catch anything that is not explicitly handled, and do not publish it to the
// application. If new metadata is added to a batch that needs to be
// published, it should be called out here.
template <typename Which>
void Encode(Which, const typename Which::ValueType&) {}
void Encode(UserAgentMetadata, const Slice& slice) {
Append(UserAgentMetadata::key(), slice);
}
void Encode(HostMetadata, const Slice& slice) {
Append(HostMetadata::key(), slice);
}
void Encode(GrpcPreviousRpcAttemptsMetadata, uint32_t count) {
Append(GrpcPreviousRpcAttemptsMetadata::key(), count);
}
void Encode(GrpcRetryPushbackMsMetadata, Duration count) {
Append(GrpcRetryPushbackMsMetadata::key(), count.millis());
}
void Encode(LbTokenMetadata, const Slice& slice) {
Append(LbTokenMetadata::key(), slice);
}
private:
void Append(absl::string_view key, int64_t value) {
Append(StaticSlice::FromStaticString(key).c_slice(),
Slice::FromInt64(value).c_slice());
}
void Append(absl::string_view key, const Slice& value) {
Append(StaticSlice::FromStaticString(key).c_slice(), value.c_slice());
}
void Append(grpc_slice key, grpc_slice value) {
if (dest_->count == dest_->capacity) {
Crash(absl::StrCat(
"Too many metadata entries: capacity=", dest_->capacity, " on ",
is_client_ ? "client" : "server", " encoding ", encoding_->count(),
" elements: ", encoding_->DebugString().c_str()));
}
auto* mdusr = &dest_->metadata[dest_->count++];
mdusr->key = key;
mdusr->value = value;
}
grpc_metadata_array* const dest_;
const grpc_metadata_batch* const encoding_;
const bool is_client_;
};
void PublishMetadataArray(grpc_metadata_batch* md, grpc_metadata_array* array,
bool is_client);
void CToMetadata(grpc_metadata* metadata, size_t count, grpc_metadata_batch* b);
const char* GrpcOpTypeName(grpc_op_type op);
bool ValidateMetadata(size_t count, grpc_metadata* metadata);
void EndOpImmediately(grpc_completion_queue* cq, void* notify_tag,
bool is_notify_tag_closure);
inline bool AreWriteFlagsValid(uint32_t flags) {
// check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set
const uint32_t allowed_write_positions =
(GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
const uint32_t invalid_positions = ~allowed_write_positions;
return !(flags & invalid_positions);
}
inline bool AreInitialMetadataFlagsValid(uint32_t flags) {
// check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set
uint32_t invalid_positions = ~GRPC_INITIAL_METADATA_USED_MASK;
return !(flags & invalid_positions);
}
// One batch operation
// Wrapper around promise steps to perform once of the batch operations for the
// legacy grpc surface api.
template <typename SetupResult, grpc_op_type kOp>
class OpHandlerImpl {
public:
using PromiseFactory = promise_detail::OncePromiseFactory<void, SetupResult>;
using Promise = typename PromiseFactory::Promise;
static_assert(!std::is_same<Promise, void>::value,
"PromiseFactory must return a promise");
OpHandlerImpl() : state_(State::kDismissed) {}
explicit OpHandlerImpl(SetupResult result) : state_(State::kPromiseFactory) {
Construct(&promise_factory_, std::move(result));
}
~OpHandlerImpl() {
switch (state_) {
case State::kDismissed:
break;
case State::kPromiseFactory:
Destruct(&promise_factory_);
break;
case State::kPromise:
Destruct(&promise_);
break;
}
}
OpHandlerImpl(const OpHandlerImpl&) = delete;
OpHandlerImpl& operator=(const OpHandlerImpl&) = delete;
OpHandlerImpl(OpHandlerImpl&& other) noexcept : state_(other.state_) {
switch (state_) {
case State::kDismissed:
break;
case State::kPromiseFactory:
Construct(&promise_factory_, std::move(other.promise_factory_));
break;
case State::kPromise:
Construct(&promise_, std::move(other.promise_));
break;
}
}
OpHandlerImpl& operator=(OpHandlerImpl&& other) noexcept = delete;
Poll<StatusFlag> operator()() {
switch (state_) {
case State::kDismissed:
return Success{};
case State::kPromiseFactory: {
auto promise = promise_factory_.Make();
Destruct(&promise_factory_);
Construct(&promise_, std::move(promise));
state_ = State::kPromise;
}
ABSL_FALLTHROUGH_INTENDED;
case State::kPromise: {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%sBeginPoll %s",
Activity::current()->DebugTag().c_str(), OpName());
}
auto r = poll_cast<StatusFlag>(promise_());
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_INFO, "%sEndPoll %s --> %s",
Activity::current()->DebugTag().c_str(), OpName(),
r.pending() ? "PENDING" : (r.value().ok() ? "OK" : "FAILURE"));
}
return r;
}
}
GPR_UNREACHABLE_CODE(return Pending{});
}
private:
enum class State {
kDismissed,
kPromiseFactory,
kPromise,
};
static const char* OpName() { return GrpcOpTypeName(kOp); }
// gcc-12 has problems with this being a variant
GPR_NO_UNIQUE_ADDRESS State state_;
union {
PromiseFactory promise_factory_;
Promise promise_;
};
};
template <grpc_op_type op_type, typename PromiseFactory>
auto OpHandler(PromiseFactory setup) {
return OpHandlerImpl<PromiseFactory, op_type>(std::move(setup));
}
class BatchOpIndex {
public:
BatchOpIndex(const grpc_op* ops, size_t nops) : ops_(ops) {
for (size_t i = 0; i < nops; i++) {
idxs_[ops[i].op] = static_cast<uint8_t>(i);
}
}
// 1. Check if op_type is in the batch
// 2. If it is, run the setup function in the context of the API call (NOT in
// the call party).
// 3. This setup function returns a promise factory which we'll then run *in*
// the party to do initial setup, and have it return the promise that we'll
// ultimately poll on til completion.
// Once we express our surface API in terms of core internal types this whole
// dance will go away.
template <grpc_op_type op_type, typename SetupFn>
auto OpHandler(SetupFn setup) {
using SetupResult = decltype(std::declval<SetupFn>()(grpc_op()));
using Impl = OpHandlerImpl<SetupResult, op_type>;
if (const grpc_op* op = this->op(op_type)) {
auto r = setup(*op);
return Impl(std::move(r));
} else {
return Impl();
}
}
const grpc_op* op(grpc_op_type op_type) const {
return idxs_[op_type] == 255 ? nullptr : &ops_[idxs_[op_type]];
}
private:
const grpc_op* const ops_;
std::array<uint8_t, 8> idxs_{255, 255, 255, 255, 255, 255, 255, 255};
};
// Defines a promise that calls grpc_cq_end_op() (on first poll) and then waits
// for the callback supplied to grpc_cq_end_op() to be called, before resolving
// to Empty{}
class WaitForCqEndOp {
public:
WaitForCqEndOp(bool is_closure, void* tag, grpc_error_handle error,
grpc_completion_queue* cq)
: state_{NotStarted{is_closure, tag, std::move(error), cq}} {}
Poll<Empty> operator()();
WaitForCqEndOp(const WaitForCqEndOp&) = delete;
WaitForCqEndOp& operator=(const WaitForCqEndOp&) = delete;
WaitForCqEndOp(WaitForCqEndOp&& other) noexcept
: state_(std::move(absl::get<NotStarted>(other.state_))) {
other.state_.emplace<Invalid>();
}
WaitForCqEndOp& operator=(WaitForCqEndOp&& other) noexcept {
state_ = std::move(absl::get<NotStarted>(other.state_));
other.state_.emplace<Invalid>();
return *this;
}
private:
struct NotStarted {
bool is_closure;
void* tag;
grpc_error_handle error;
grpc_completion_queue* cq;
};
struct Started {
explicit Started(Waker waker) : waker(std::move(waker)) {}
Waker waker;
grpc_cq_completion completion;
std::atomic<bool> done{false};
};
struct Invalid {};
using State = absl::variant<NotStarted, Started, Invalid>;
static std::string StateString(const State& state);
State state_{Invalid{}};
};
template <typename FalliblePart, typename FinalPart>
auto InfallibleBatch(FalliblePart fallible_part, FinalPart final_part,
bool is_notify_tag_closure, void* notify_tag,
grpc_completion_queue* cq) {
// Perform fallible_part, then final_part, then wait for the
// completion queue to be done.
// If cancelled, we'll ensure the completion queue is notified.
// There's a slight bug here in that if we cancel this promise after
// the WaitForCqEndOp we'll double post -- but we don't currently do that.
return OnCancelFactory(
[fallible_part = std::move(fallible_part),
final_part = std::move(final_part), is_notify_tag_closure, notify_tag,
cq]() mutable {
return LogPollBatch(notify_tag,
Seq(std::move(fallible_part), std::move(final_part),
[is_notify_tag_closure, notify_tag, cq]() {
return WaitForCqEndOp(is_notify_tag_closure,
notify_tag,
absl::OkStatus(), cq);
}));
},
[cq, notify_tag]() {
grpc_cq_end_op(
cq, notify_tag, absl::OkStatus(),
[](void*, grpc_cq_completion* completion) { delete completion; },
nullptr, new grpc_cq_completion);
});
}
template <typename FalliblePart>
auto FallibleBatch(FalliblePart fallible_part, bool is_notify_tag_closure,
void* notify_tag, grpc_completion_queue* cq) {
// Perform fallible_part, then wait for the completion queue to be done.
// If cancelled, we'll ensure the completion queue is notified.
// There's a slight bug here in that if we cancel this promise after
// the WaitForCqEndOp we'll double post -- but we don't currently do that.
return OnCancelFactory(
[fallible_part = std::move(fallible_part), is_notify_tag_closure,
notify_tag, cq]() mutable {
return LogPollBatch(
notify_tag,
Seq(std::move(fallible_part),
[is_notify_tag_closure, notify_tag, cq](StatusFlag r) {
return WaitForCqEndOp(is_notify_tag_closure, notify_tag,
StatusCast<absl::Status>(r), cq);
}));
},
[cq]() {
grpc_cq_end_op(
cq, nullptr, absl::CancelledError(),
[](void*, grpc_cq_completion* completion) { delete completion; },
nullptr, new grpc_cq_completion);
});
}
template <typename F>
class PollBatchLogger {
public:
PollBatchLogger(void* tag, F f) : tag_(tag), f_(std::move(f)) {}
auto operator()() {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "Poll batch %p", tag_);
}
auto r = f_();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "Poll batch %p --> %s", tag_, ResultString(r).c_str());
}
return r;
}
private:
template <typename T>
static std::string ResultString(Poll<T> r) {
if (r.pending()) return "PENDING";
return ResultString(r.value());
}
static std::string ResultString(Empty) { return "DONE"; }
void* tag_;
F f_;
};
template <typename F>
PollBatchLogger<F> LogPollBatch(void* tag, F f) {
return PollBatchLogger<F>(tag, std::move(f));
}
class MessageReceiver {
public:
grpc_compression_algorithm incoming_compression_algorithm() const {
return incoming_compression_algorithm_;
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm incoming_compression_algorithm) {
incoming_compression_algorithm_ = incoming_compression_algorithm;
}
uint32_t last_message_flags() const { return test_only_last_message_flags_; }
template <typename Puller>
auto MakeBatchOp(const grpc_op& op, Puller* puller) {
CHECK_EQ(recv_message_, nullptr);
recv_message_ = op.data.recv_message.recv_message;
return [this, puller]() mutable {
return Map(puller->PullMessage(),
[this](ValueOrFailure<absl::optional<MessageHandle>> msg) {
return FinishRecvMessage(std::move(msg));
});
};
}
private:
StatusFlag FinishRecvMessage(
ValueOrFailure<absl::optional<MessageHandle>> result);
grpc_byte_buffer** recv_message_ = nullptr;
uint32_t test_only_last_message_flags_ = 0;
// Compression algorithm for incoming data
grpc_compression_algorithm incoming_compression_algorithm_ =
GRPC_COMPRESS_NONE;
};
std::string MakeErrorString(const ServerMetadata* trailing_metadata);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_CALL_UTILS_H

@ -44,6 +44,7 @@
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/call_arena_allocator.h"
#include "src/core/lib/transport/call_destination.h"
#include "src/core/lib/transport/connectivity_state.h"
// Forward declaration to avoid dependency loop.
@ -54,7 +55,7 @@ namespace grpc_core {
// Forward declaration to avoid dependency loop.
class Transport;
class Channel : public InternallyRefCounted<Channel>,
class Channel : public UnstartedCallDestination,
public CppImplOf<Channel, grpc_channel> {
public:
struct RegisteredCall {
@ -68,18 +69,6 @@ class Channel : public InternallyRefCounted<Channel>,
~RegisteredCall();
};
// Though internally ref counted channels expose their "Ref" method to
// create a RefCountedPtr to themselves. The OrphanablePtr owner is the
// singleton decision maker on whether the channel should be destroyed or
// not.
// TODO(ctiller): in a future change (I have it written) these will be removed
// and substituted with DualRefCounted<Channel> as a base.
RefCountedPtr<Channel> Ref() { return InternallyRefCounted<Channel>::Ref(); }
template <typename T>
RefCountedPtr<T> RefAsSubclass() {
return InternallyRefCounted<Channel>::RefAsSubclass<T>();
}
virtual bool IsLame() const = 0;
// TODO(roth): This should return a C++ type.
@ -164,7 +153,7 @@ class Channel : public InternallyRefCounted<Channel>,
/// The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
/// is safe to use from within core.
inline void grpc_channel_destroy_internal(grpc_channel* channel) {
grpc_core::Channel::FromC(channel)->Orphan();
grpc_core::Channel::FromC(channel)->Unref();
}
// Return the channel's compression options.

@ -14,6 +14,8 @@
// limitations under the License.
//
#include "src/core/lib/surface/channel_create.h"
#include "absl/log/check.h"
#include <grpc/grpc.h>
@ -34,7 +36,7 @@
namespace grpc_core {
absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate(
absl::StatusOr<RefCountedPtr<Channel>> ChannelCreate(
std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type, Transport* optional_transport) {
global_stats().IncrementClientChannelsCreated();
@ -80,7 +82,7 @@ absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate(
args = args.SetObject(optional_transport);
}
// Delegate to appropriate channel impl.
if (!IsCallV3Enabled()) {
if (!args.GetBool(GRPC_ARG_USE_V3_STACK).value_or(false)) {
return LegacyChannel::Create(std::move(target), std::move(args),
channel_stack_type);
}

@ -24,16 +24,17 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_stack_type.h"
#define GRPC_ARG_USE_V3_STACK "grpc.internal.use_v3_stack"
namespace grpc_core {
class Transport;
// Creates a client channel.
absl::StatusOr<OrphanablePtr<Channel>> ChannelCreate(
absl::StatusOr<RefCountedPtr<Channel>> ChannelCreate(
std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type, Transport* optional_transport);

@ -251,8 +251,8 @@ ChannelInit::StackConfig ChannelInit::BuildStackConfig(
MutexLock lock(m);
// List the channel stack type (since we'll be repeatedly printing graphs in
// this loop).
gpr_log(GPR_INFO,
"ORDERED CHANNEL STACK %s:", grpc_channel_stack_type_string(type));
LOG(INFO) << "ORDERED CHANNEL STACK "
<< grpc_channel_stack_type_string(type) << ":";
// First build up a map of filter -> file:line: strings, because it helps
// the readability of this log to get later fields aligned vertically.
std::map<const grpc_channel_filter*, std::string> loc_strs;

@ -0,0 +1,423 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/client_call.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/bitset.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/all_ok.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
namespace grpc_core {
namespace {
grpc_call_error ValidateClientBatch(const grpc_op* ops, size_t nops) {
BitSet<8> got_ops;
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!AreInitialMetadataFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
if (!ValidateMetadata(op.data.send_initial_metadata.count,
op.data.send_initial_metadata.metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_SEND_MESSAGE:
if (!AreWriteFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
case GRPC_OP_SEND_STATUS_FROM_SERVER:
return GRPC_CALL_ERROR_NOT_ON_CLIENT;
}
if (got_ops.is_set(op.op)) return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
got_ops.set(op.op);
}
return GRPC_CALL_OK;
}
} // namespace
ClientCall::ClientCall(
grpc_call*, uint32_t, grpc_completion_queue* cq, Slice path,
absl::optional<Slice> authority, bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination)
: Call(false, deadline, std::move(arena), event_engine),
cq_(cq),
call_destination_(std::move(destination)),
compression_options_(compression_options) {
global_stats().IncrementClientCallsCreated();
send_initial_metadata_->Set(HttpPathMetadata(), std::move(path));
if (authority.has_value()) {
send_initial_metadata_->Set(HttpAuthorityMetadata(), std::move(*authority));
}
send_initial_metadata_->Set(
GrpcRegisteredMethod(),
reinterpret_cast<void*>(static_cast<uintptr_t>(registered_method)));
if (deadline != Timestamp::InfFuture()) {
send_initial_metadata_->Set(GrpcTimeoutMetadata(), deadline);
UpdateDeadline(deadline);
}
}
grpc_call_error ClientCall::StartBatch(const grpc_op* ops, size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 0) {
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
const grpc_call_error validation_result = ValidateClientBatch(ops, nops);
if (validation_result != GRPC_CALL_OK) {
return validation_result;
}
CommitBatch(ops, nops, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
void ClientCall::CancelWithError(grpc_error_handle error) {
cancel_status_.Set(new absl::Status(error));
auto cur_state = call_state_.load(std::memory_order_acquire);
while (true) {
if (grpc_call_trace.enabled()) {
LOG(INFO) << DebugTag() << "CancelWithError "
<< GRPC_DUMP_ARGS(cur_state, error);
}
switch (cur_state) {
case kCancelled:
return;
case kUnstarted:
if (call_state_.compare_exchange_strong(cur_state, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
return;
}
break;
case kStarted:
started_call_initiator_.SpawnInfallible(
"CancelWithError", [self = WeakRefAsSubclass<ClientCall>(),
error = std::move(error)]() mutable {
self->started_call_initiator_.Cancel(std::move(error));
return Empty{};
});
return;
default:
if (call_state_.compare_exchange_strong(cur_state, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
auto* unordered_start = reinterpret_cast<UnorderedStart*>(cur_state);
while (unordered_start != nullptr) {
auto next = unordered_start->next;
delete unordered_start;
unordered_start = next;
}
return;
}
}
}
}
template <typename Batch>
void ClientCall::ScheduleCommittedBatch(Batch batch) {
auto cur_state = call_state_.load(std::memory_order_acquire);
while (true) {
switch (cur_state) {
case kUnstarted:
default: { // UnorderedStart
auto pending = std::make_unique<UnorderedStart>();
pending->start_pending_batch = [this,
batch = std::move(batch)]() mutable {
started_call_initiator_.SpawnInfallible("batch", std::move(batch));
};
while (true) {
pending->next = reinterpret_cast<UnorderedStart*>(cur_state);
if (call_state_.compare_exchange_strong(
cur_state, reinterpret_cast<uintptr_t>(pending.get()),
std::memory_order_acq_rel, std::memory_order_acquire)) {
std::ignore = pending.release();
return;
}
if (cur_state == kStarted) {
pending->start_pending_batch();
return;
}
if (cur_state == kCancelled) {
return;
}
}
}
case kStarted:
started_call_initiator_.SpawnInfallible("batch", std::move(batch));
return;
case kCancelled:
return;
}
}
}
void ClientCall::StartCall(const grpc_op& send_initial_metadata_op) {
auto cur_state = call_state_.load(std::memory_order_acquire);
CToMetadata(send_initial_metadata_op.data.send_initial_metadata.metadata,
send_initial_metadata_op.data.send_initial_metadata.count,
send_initial_metadata_.get());
PrepareOutgoingInitialMetadata(send_initial_metadata_op,
*send_initial_metadata_);
auto call = MakeCallPair(std::move(send_initial_metadata_), event_engine(),
arena()->Ref());
started_call_initiator_ = std::move(call.initiator);
call_destination_->StartCall(std::move(call.handler));
while (true) {
switch (cur_state) {
case kUnstarted:
if (call_state_.compare_exchange_strong(cur_state, kStarted,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
return;
}
break;
case kStarted:
Crash("StartCall called twice"); // probably we crash earlier...
case kCancelled:
return;
default: { // UnorderedStart
if (call_state_.compare_exchange_strong(cur_state, kStarted,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
auto unordered_start = reinterpret_cast<UnorderedStart*>(cur_state);
while (unordered_start->next != nullptr) {
unordered_start->start_pending_batch();
auto next = unordered_start->next;
delete unordered_start;
unordered_start = next;
}
return;
}
break;
}
}
}
}
void ClientCall::CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 1 && ops[0].op == GRPC_OP_SEND_INITIAL_METADATA) {
StartCall(ops[0]);
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return;
}
if (!is_notify_tag_closure) grpc_cq_begin_op(cq_, notify_tag);
BatchOpIndex op_index(ops, nops);
auto send_message =
op_index.OpHandler<GRPC_OP_SEND_MESSAGE>([this](const grpc_op& op) {
SliceBuffer send;
grpc_slice_buffer_swap(
&op.data.send_message.send_message->data.raw.slice_buffer,
send.c_slice_buffer());
auto msg = arena()->MakePooled<Message>(std::move(send), op.flags);
return [this, msg = std::move(msg)]() mutable {
return started_call_initiator_.PushMessage(std::move(msg));
};
});
auto send_close_from_client =
op_index.OpHandler<GRPC_OP_SEND_CLOSE_FROM_CLIENT>(
[this](const grpc_op&) {
return [this]() {
started_call_initiator_.FinishSends();
return Success{};
};
});
auto recv_message =
op_index.OpHandler<GRPC_OP_RECV_MESSAGE>([this](const grpc_op& op) {
return message_receiver_.MakeBatchOp(op, &started_call_initiator_);
});
auto recv_initial_metadata =
op_index.OpHandler<GRPC_OP_RECV_INITIAL_METADATA>([this](
const grpc_op& op) {
return [this,
array = op.data.recv_initial_metadata.recv_initial_metadata]() {
return Map(
started_call_initiator_.PullServerInitialMetadata(),
[this,
array](ValueOrFailure<absl::optional<ServerMetadataHandle>> md) {
ServerMetadataHandle metadata;
if (!md.ok() || !md->has_value()) {
is_trailers_only_ = true;
metadata = Arena::MakePooled<ServerMetadata>();
} else {
metadata = std::move(md->value());
is_trailers_only_ =
metadata->get(GrpcTrailersOnly()).value_or(false);
}
ProcessIncomingInitialMetadata(*metadata);
PublishMetadataArray(metadata.get(), array, true);
received_initial_metadata_ = std::move(metadata);
return Success{};
});
};
});
auto primary_ops = AllOk<StatusFlag>(
TrySeq(std::move(send_message), std::move(send_close_from_client)),
TrySeq(std::move(recv_initial_metadata), std::move(recv_message)));
if (const grpc_op* op = op_index.op(GRPC_OP_SEND_INITIAL_METADATA)) {
StartCall(*op);
}
if (const grpc_op* op = op_index.op(GRPC_OP_RECV_STATUS_ON_CLIENT)) {
auto out_status = op->data.recv_status_on_client.status;
auto out_status_details = op->data.recv_status_on_client.status_details;
auto out_error_string = op->data.recv_status_on_client.error_string;
auto out_trailing_metadata =
op->data.recv_status_on_client.trailing_metadata;
auto make_read_trailing_metadata = [this, out_status, out_status_details,
out_error_string,
out_trailing_metadata]() {
return Map(
started_call_initiator_.PullServerTrailingMetadata(),
[this, out_status, out_status_details, out_error_string,
out_trailing_metadata](
ServerMetadataHandle server_trailing_metadata) {
if (grpc_call_trace.enabled()) {
LOG(INFO) << DebugTag() << "RecvStatusOnClient "
<< server_trailing_metadata->DebugString();
}
const auto status =
server_trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN);
*out_status = status;
Slice message_slice;
if (Slice* message = server_trailing_metadata->get_pointer(
GrpcMessageMetadata())) {
message_slice = message->Ref();
}
*out_status_details = message_slice.TakeCSlice();
if (out_error_string != nullptr) {
if (status != GRPC_STATUS_OK) {
*out_error_string = gpr_strdup(
MakeErrorString(server_trailing_metadata.get()).c_str());
} else {
*out_error_string = nullptr;
}
}
PublishMetadataArray(server_trailing_metadata.get(),
out_trailing_metadata, true);
received_trailing_metadata_ = std::move(server_trailing_metadata);
return Success{};
});
};
ScheduleCommittedBatch(InfallibleBatch(
std::move(primary_ops),
OpHandler<GRPC_OP_RECV_STATUS_ON_CLIENT>(OnCancelFactory(
std::move(make_read_trailing_metadata),
[this, out_status, out_status_details, out_error_string,
out_trailing_metadata]() {
auto* status = cancel_status_.Get();
CHECK_NE(status, nullptr);
*out_status = static_cast<grpc_status_code>(status->code());
*out_status_details =
Slice::FromCopiedString(status->message()).TakeCSlice();
if (out_error_string != nullptr) {
*out_error_string = nullptr;
}
out_trailing_metadata->count = 0;
})),
is_notify_tag_closure, notify_tag, cq_));
} else {
ScheduleCommittedBatch(FallibleBatch(
std::move(primary_ops), is_notify_tag_closure, notify_tag, cq_));
}
}
char* ClientCall::GetPeer() {
Slice peer_slice = GetPeerString();
if (!peer_slice.empty()) {
absl::string_view peer_string_view = peer_slice.as_string_view();
char* peer_string =
static_cast<char*>(gpr_malloc(peer_string_view.size() + 1));
memcpy(peer_string, peer_string_view.data(), peer_string_view.size());
peer_string[peer_string_view.size()] = '\0';
return peer_string;
}
return gpr_strdup("unknown");
}
grpc_call* MakeClientCall(
grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path, absl::optional<Slice> authority,
bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination) {
return arena
->New<ClientCall>(parent_call, propagation_mask, cq, std::move(path),
std::move(authority), registered_method, deadline,
compression_options, event_engine, arena, destination)
->c_ptr();
}
} // namespace grpc_core

@ -0,0 +1,180 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <atomic>
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/single_set_ptr.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_utils.h"
#include "src/core/lib/transport/metadata.h"
namespace grpc_core {
class ClientCall final
: public Call,
public DualRefCounted<ClientCall, NonPolymorphicRefCount,
UnrefCallDestroy> {
public:
ClientCall(grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path,
absl::optional<Slice> authority, bool registered_method,
Timestamp deadline, grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination);
void CancelWithError(grpc_error_handle error) override;
bool is_trailers_only() const override { return is_trailers_only_; }
absl::string_view GetServerAuthority() const override {
Crash("unimplemented");
}
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { Ref().release(); }
void ExternalUnref() override { Unref(); }
void InternalRef(const char*) override { WeakRef().release(); }
void InternalUnref(const char*) override { WeakUnref(); }
void Orphaned() override {
// TODO(ctiller): only when we're not already finished
CancelWithError(absl::CancelledError());
}
void SetCompletionQueue(grpc_completion_queue*) override {
Crash("unimplemented");
}
grpc_compression_options compression_options() override {
return compression_options_;
}
grpc_call_stack* call_stack() override { return nullptr; }
char* GetPeer() override;
bool Completed() final { Crash("unimplemented"); }
bool failed_before_recv_message() const final { Crash("unimplemented"); }
grpc_compression_algorithm incoming_compression_algorithm() override {
return message_receiver_.incoming_compression_algorithm();
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
message_receiver_.SetIncomingCompressionAlgorithm(algorithm);
}
uint32_t test_only_message_flags() override {
return message_receiver_.last_message_flags();
}
void Destroy() {
auto arena = this->arena()->Ref();
this->~ClientCall();
}
private:
struct UnorderedStart {
absl::AnyInvocable<void()> start_pending_batch;
UnorderedStart* next;
};
void CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure);
template <typename Batch>
void ScheduleCommittedBatch(Batch batch);
void StartCall(const grpc_op& send_initial_metadata_op);
std::string DebugTag() { return absl::StrFormat("CLIENT_CALL[%p]: ", this); }
// call_state_ is one of:
// 1. kUnstarted - call has not yet been started
// 2. pointer to an UnorderedStart - call has ops started, but no send initial
// metadata yet
// 3. kStarted - call has been started and call_initiator_ is ready
// 4. kCancelled - call was cancelled before starting
// In cases (1) and (2) send_initial_metadata_ is used to store the initial
// but unsent metadata.
// In case (3) started_call_initiator_ is used to store the call initiator.
// In case (4) no other state is used.
enum CallState : uintptr_t {
kUnstarted = 0,
kStarted = 1,
kCancelled = 2,
};
std::atomic<uintptr_t> call_state_{kUnstarted};
ClientMetadataHandle send_initial_metadata_{
Arena::MakePooled<ClientMetadata>()};
CallInitiator started_call_initiator_;
// Status passed to CancelWithError;
// if call_state_ == kCancelled then this is the authoritative status,
// otherwise the server trailing metadata from started_call_initiator_ is
// authoritative.
SingleSetPtr<absl::Status> cancel_status_;
MessageReceiver message_receiver_;
grpc_completion_queue* const cq_;
const RefCountedPtr<UnstartedCallDestination> call_destination_;
const grpc_compression_options compression_options_;
ServerMetadataHandle received_initial_metadata_;
ServerMetadataHandle received_trailing_metadata_;
bool is_trailers_only_;
};
grpc_call* MakeClientCall(
grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* cq, Slice path, absl::optional<Slice> authority,
bool registered_method, Timestamp deadline,
grpc_compression_options compression_options,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena,
RefCountedPtr<UnstartedCallDestination> destination);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_CLIENT_CALL_H

File diff suppressed because it is too large Load Diff

@ -0,0 +1,372 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <atomic>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/server/server_interface.h"
#include "src/core/telemetry/call_tracer.h"
#include "src/core/util/alloc.h"
namespace grpc_core {
///////////////////////////////////////////////////////////////////////////////
// FilterStackCall
// To be removed once promise conversion is complete
class FilterStackCall final : public Call {
public:
~FilterStackCall() override {
gpr_free(static_cast<void*>(const_cast<char*>(final_info_.error_string)));
}
bool Completed() override {
return gpr_atm_acq_load(&received_final_op_atm_) != 0;
}
// TODO(ctiller): return absl::StatusOr<SomeSmartPointer<Call>>?
static grpc_error_handle Create(grpc_call_create_args* args,
grpc_call** out_call);
static Call* FromTopElem(grpc_call_element* elem) {
return FromCallStack(grpc_call_stack_from_top_element(elem));
}
grpc_call_stack* call_stack() override {
return reinterpret_cast<grpc_call_stack*>(
reinterpret_cast<char*>(this) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(*this)));
}
grpc_call_element* call_elem(size_t idx) {
return grpc_call_stack_element(call_stack(), idx);
}
CallCombiner* call_combiner() { return &call_combiner_; }
void CancelWithError(grpc_error_handle error) override;
void SetCompletionQueue(grpc_completion_queue* cq) override;
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { ext_ref_.Ref(); }
void ExternalUnref() override;
void InternalRef(const char* reason) override {
GRPC_CALL_STACK_REF(call_stack(), reason);
}
void InternalUnref(const char* reason) override {
GRPC_CALL_STACK_UNREF(call_stack(), reason);
}
bool is_trailers_only() const override {
bool result = is_trailers_only_;
DCHECK(!result || recv_initial_metadata_.TransportSize() == 0);
return result;
}
bool failed_before_recv_message() const override {
return call_failed_before_recv_message_;
}
uint32_t test_only_message_flags() override {
return test_only_last_message_flags_;
}
absl::string_view GetServerAuthority() const override {
const Slice* authority_metadata =
recv_initial_metadata_.get_pointer(HttpAuthorityMetadata());
if (authority_metadata == nullptr) return "";
return authority_metadata->as_string_view();
}
static size_t InitialSizeEstimate() {
return sizeof(FilterStackCall) +
sizeof(BatchControl) * kMaxConcurrentBatches;
}
char* GetPeer() final;
grpc_compression_options compression_options() override {
return channel_->compression_options();
}
void DeleteThis() {
auto arena = this->arena()->Ref();
this->~FilterStackCall();
}
Channel* channel() const { return channel_.get(); }
private:
class ScopedContext : public promise_detail::Context<Arena> {
public:
explicit ScopedContext(FilterStackCall* call)
: promise_detail::Context<Arena>(call->arena()) {}
};
static constexpr gpr_atm kRecvNone = 0;
static constexpr gpr_atm kRecvInitialMetadataFirst = 1;
enum class PendingOp {
kRecvMessage,
kRecvInitialMetadata,
kRecvTrailingMetadata,
kSends
};
static intptr_t PendingOpMask(PendingOp op) {
return static_cast<intptr_t>(1) << static_cast<intptr_t>(op);
}
static std::string PendingOpString(intptr_t pending_ops) {
std::vector<absl::string_view> pending_op_strings;
if (pending_ops & PendingOpMask(PendingOp::kRecvMessage)) {
pending_op_strings.push_back("kRecvMessage");
}
if (pending_ops & PendingOpMask(PendingOp::kRecvInitialMetadata)) {
pending_op_strings.push_back("kRecvInitialMetadata");
}
if (pending_ops & PendingOpMask(PendingOp::kRecvTrailingMetadata)) {
pending_op_strings.push_back("kRecvTrailingMetadata");
}
if (pending_ops & PendingOpMask(PendingOp::kSends)) {
pending_op_strings.push_back("kSends");
}
return absl::StrCat("{", absl::StrJoin(pending_op_strings, ","), "}");
}
struct BatchControl {
FilterStackCall* call_ = nullptr;
CallTracerAnnotationInterface* call_tracer_ = nullptr;
grpc_transport_stream_op_batch op_;
// Share memory for cq_completion and notify_tag as they are never needed
// simultaneously. Each byte used in this data structure count as six bytes
// per call, so any savings we can make are worthwhile,
// We use notify_tag to determine whether or not to send notification to the
// completion queue. Once we've made that determination, we can reuse the
// memory for cq_completion.
union {
grpc_cq_completion cq_completion;
struct {
// Any given op indicates completion by either (a) calling a closure or
// (b) sending a notification on the call's completion queue. If
// \a is_closure is true, \a tag indicates a closure to be invoked;
// otherwise, \a tag indicates the tag to be used in the notification to
// be sent to the completion queue.
void* tag;
bool is_closure;
} notify_tag;
} completion_data_;
grpc_closure start_batch_;
grpc_closure finish_batch_;
std::atomic<intptr_t> ops_pending_{0};
AtomicError batch_error_;
void set_pending_ops(uintptr_t ops) {
ops_pending_.store(ops, std::memory_order_release);
}
bool completed_batch_step(PendingOp op) {
auto mask = PendingOpMask(op);
auto r = ops_pending_.fetch_sub(mask, std::memory_order_acq_rel);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "BATCH:%p COMPLETE:%s REMAINING:%s (tag:%p)", this,
PendingOpString(mask).c_str(),
PendingOpString(r & ~mask).c_str(),
completion_data_.notify_tag.tag);
}
CHECK_NE((r & mask), 0);
return r == mask;
}
void PostCompletion();
void FinishStep(PendingOp op);
void ProcessDataAfterMetadata();
void ReceivingStreamReady(grpc_error_handle error);
void ReceivingInitialMetadataReady(grpc_error_handle error);
void ReceivingTrailingMetadataReady(grpc_error_handle error);
void FinishBatch(grpc_error_handle error);
};
FilterStackCall(RefCountedPtr<Arena> arena,
const grpc_call_create_args& args);
static void ReleaseCall(void* call, grpc_error_handle);
static void DestroyCall(void* call, grpc_error_handle);
static FilterStackCall* FromCallStack(grpc_call_stack* call_stack) {
return reinterpret_cast<FilterStackCall*>(
reinterpret_cast<char*>(call_stack) -
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(FilterStackCall)));
}
void ExecuteBatch(grpc_transport_stream_op_batch* batch,
grpc_closure* start_batch_closure);
void SetFinalStatus(grpc_error_handle error);
BatchControl* ReuseOrAllocateBatchControl(const grpc_op* ops);
bool PrepareApplicationMetadata(size_t count, grpc_metadata* metadata,
bool is_trailing);
void PublishAppMetadata(grpc_metadata_batch* b, bool is_trailing);
void RecvInitialFilter(grpc_metadata_batch* b);
void RecvTrailingFilter(grpc_metadata_batch* b,
grpc_error_handle batch_error);
grpc_compression_algorithm incoming_compression_algorithm() override {
return incoming_compression_algorithm_;
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
incoming_compression_algorithm_ = algorithm;
}
RefCountedPtr<Channel> channel_;
RefCount ext_ref_;
CallCombiner call_combiner_;
grpc_completion_queue* cq_;
grpc_polling_entity pollent_;
/// has grpc_call_unref been called
bool destroy_called_ = false;
// Trailers-only response status
bool is_trailers_only_ = false;
/// which ops are in-flight
bool sent_initial_metadata_ = false;
bool sending_message_ = false;
bool sent_final_op_ = false;
bool received_initial_metadata_ = false;
bool receiving_message_ = false;
bool requested_final_op_ = false;
gpr_atm received_final_op_atm_ = 0;
BatchControl* active_batches_[kMaxConcurrentBatches] = {};
grpc_transport_stream_op_batch_payload stream_op_payload_;
// first idx: is_receiving, second idx: is_trailing
grpc_metadata_batch send_initial_metadata_;
grpc_metadata_batch send_trailing_metadata_;
grpc_metadata_batch recv_initial_metadata_;
grpc_metadata_batch recv_trailing_metadata_;
// Buffered read metadata waiting to be returned to the application.
// Element 0 is initial metadata, element 1 is trailing metadata.
grpc_metadata_array* buffered_metadata_[2] = {};
// Call data useful used for reporting. Only valid after the call has
// completed
grpc_call_final_info final_info_;
SliceBuffer send_slice_buffer_;
absl::optional<SliceBuffer> receiving_slice_buffer_;
uint32_t receiving_stream_flags_;
uint32_t test_only_last_message_flags_ = 0;
// Compression algorithm for *incoming* data
grpc_compression_algorithm incoming_compression_algorithm_ =
GRPC_COMPRESS_NONE;
bool call_failed_before_recv_message_ = false;
grpc_byte_buffer** receiving_buffer_ = nullptr;
grpc_slice receiving_slice_ = grpc_empty_slice();
grpc_closure receiving_stream_ready_;
grpc_closure receiving_initial_metadata_ready_;
grpc_closure receiving_trailing_metadata_ready_;
// Status about operation of call
bool sent_server_trailing_metadata_ = false;
gpr_atm cancelled_with_error_ = 0;
grpc_closure release_call_;
union {
struct {
grpc_status_code* status;
grpc_slice* status_details;
const char** error_string;
} client;
struct {
int* cancelled;
// backpointer to owning server if this is a server side call.
ServerInterface* core_server;
} server;
} final_op_;
AtomicError status_error_;
// recv_state can contain one of the following values:
// RECV_NONE : : no initial metadata and messages received
// RECV_INITIAL_METADATA_FIRST : received initial metadata first
// a batch_control* : received messages first
// +------1------RECV_NONE------3-----+
// | |
// | |
// v v
// RECV_INITIAL_METADATA_FIRST receiving_stream_ready_bctlp
// | ^ | ^
// | | | |
// +-----2-----+ +-----4-----+
// For 1, 4: See receiving_initial_metadata_ready() function
// For 2, 3: See receiving_stream_ready() function
gpr_atm recv_state_ = 0;
};
// Create a new call based on \a args.
// Regardless of success or failure, always returns a valid new call into *call
//
grpc_error_handle grpc_call_create(grpc_call_create_args* args,
grpc_call** call);
// Given the top call_element, get the call object.
grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_FILTER_STACK_CALL_H

@ -60,7 +60,7 @@
namespace grpc_core {
absl::StatusOr<OrphanablePtr<Channel>> LegacyChannel::Create(
absl::StatusOr<RefCountedPtr<Channel>> LegacyChannel::Create(
std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type) {
if (grpc_channel_stack_type_is_client(channel_stack_type)) {
@ -101,18 +101,16 @@ absl::StatusOr<OrphanablePtr<Channel>> LegacyChannel::Create(
GlobalStatsPluginRegistry::GetStatsPluginsForChannel(
experimental::StatsPluginChannelScope(target, authority));
}
return MakeOrphanable<LegacyChannel>(
return MakeRefCounted<LegacyChannel>(
grpc_channel_stack_type_is_client(builder.channel_stack_type()),
builder.IsPromising(), std::move(target), args, std::move(*r));
std::move(target), args, std::move(*r));
}
LegacyChannel::LegacyChannel(bool is_client, bool is_promising,
std::string target,
LegacyChannel::LegacyChannel(bool is_client, std::string target,
const ChannelArgs& channel_args,
RefCountedPtr<grpc_channel_stack> channel_stack)
: Channel(std::move(target), channel_args),
is_client_(is_client),
is_promising_(is_promising),
channel_stack_(std::move(channel_stack)) {
// We need to make sure that grpc_shutdown() does not shut things down
// until after the channel is destroyed. However, the channel may not
@ -144,13 +142,12 @@ LegacyChannel::LegacyChannel(bool is_client, bool is_promising,
};
}
void LegacyChannel::Orphan() {
void LegacyChannel::Orphaned() {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->disconnect_with_error = GRPC_ERROR_CREATE("Channel Destroyed");
grpc_channel_element* elem =
grpc_channel_stack_element(channel_stack_.get(), 0);
elem->filter->start_transport_op(elem, op);
Unref();
}
bool LegacyChannel::IsLame() const {
@ -167,7 +164,7 @@ grpc_call* LegacyChannel::CreateCall(
CHECK(is_client_);
CHECK(!(cq != nullptr && pollset_set_alternative != nullptr));
grpc_call_create_args args;
args.channel = Ref();
args.channel = RefAsSubclass<LegacyChannel>();
args.server = nullptr;
args.parent = parent_call;
args.propagation_mask = propagation_mask;
@ -204,9 +201,9 @@ bool LegacyChannel::SupportsConnectivityWatcher() const {
// A fire-and-forget object to handle external connectivity state watches.
class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
public:
StateWatcher(RefCountedPtr<LegacyChannel> channel, grpc_completion_queue* cq,
void* tag, grpc_connectivity_state last_observed_state,
Timestamp deadline)
StateWatcher(WeakRefCountedPtr<LegacyChannel> channel,
grpc_completion_queue* cq, void* tag,
grpc_connectivity_state last_observed_state, Timestamp deadline)
: channel_(std::move(channel)),
cq_(cq),
tag_(tag),
@ -313,7 +310,7 @@ class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
self->WeakUnref();
}
RefCountedPtr<LegacyChannel> channel_;
WeakRefCountedPtr<LegacyChannel> channel_;
grpc_completion_queue* cq_;
void* tag_;
@ -333,8 +330,8 @@ class LegacyChannel::StateWatcher final : public DualRefCounted<StateWatcher> {
void LegacyChannel::WatchConnectivityState(
grpc_connectivity_state last_observed_state, Timestamp deadline,
grpc_completion_queue* cq, void* tag) {
new StateWatcher(RefAsSubclass<LegacyChannel>(), cq, tag, last_observed_state,
deadline);
new StateWatcher(WeakRefAsSubclass<LegacyChannel>(), cq, tag,
last_observed_state, deadline);
}
void LegacyChannel::AddConnectivityWatcher(
@ -401,8 +398,7 @@ void LegacyChannel::Ping(grpc_completion_queue* cq, void* tag) {
ClientChannelFilter* LegacyChannel::GetClientChannelFilter() const {
grpc_channel_element* elem =
grpc_channel_stack_last_element(channel_stack_.get());
if (elem->filter != &ClientChannelFilter::kFilterVtableWithPromises &&
elem->filter != &ClientChannelFilter::kFilterVtableWithoutPromises) {
if (elem->filter != &ClientChannelFilter::kFilter) {
return nullptr;
}
return static_cast<ClientChannelFilter*>(elem->channel_data);

@ -46,16 +46,16 @@ namespace grpc_core {
class LegacyChannel final : public Channel {
public:
static absl::StatusOr<OrphanablePtr<Channel>> Create(
static absl::StatusOr<RefCountedPtr<Channel>> Create(
std::string target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type);
// Do not instantiate directly -- use Create() instead.
LegacyChannel(bool is_client, bool is_promising, std::string target,
LegacyChannel(bool is_client, std::string target,
const ChannelArgs& channel_args,
RefCountedPtr<grpc_channel_stack> channel_stack);
void Orphan() override;
void Orphaned() override;
bool IsLame() const override;
@ -65,6 +65,10 @@ class LegacyChannel final : public Channel {
absl::optional<Slice> authority, Timestamp deadline,
bool registered_method) override;
void StartCall(UnstartedCallHandler) override {
Crash("StartCall() not supported on LegacyChannel");
}
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return channel_stack_->EventEngine();
}
@ -90,7 +94,6 @@ class LegacyChannel final : public Channel {
void Ping(grpc_completion_queue* cq, void* tag) override;
bool is_client() const override { return is_client_; }
bool is_promising() const override { return is_promising_; }
grpc_channel_stack* channel_stack() const override {
return channel_stack_.get();
}
@ -103,7 +106,6 @@ class LegacyChannel final : public Channel {
ClientChannelFilter* GetClientChannelFilter() const;
const bool is_client_;
const bool is_promising_;
RefCountedPtr<grpc_channel_stack> channel_stack_;
};

@ -0,0 +1,224 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/server_call.h"
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/bitset.h"
#include "src/core/lib/promise/all_ok.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/server/server_interface.h"
namespace grpc_core {
namespace {
grpc_call_error ValidateServerBatch(const grpc_op* ops, size_t nops) {
BitSet<8> got_ops;
for (size_t op_idx = 0; op_idx < nops; op_idx++) {
const grpc_op& op = ops[op_idx];
switch (op.op) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!AreInitialMetadataFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
if (!ValidateMetadata(op.data.send_initial_metadata.count,
op.data.send_initial_metadata.metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_SEND_MESSAGE:
if (!AreWriteFlagsValid(op.flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!ValidateMetadata(
op.data.send_status_from_server.trailing_metadata_count,
op.data.send_status_from_server.trailing_metadata)) {
return GRPC_CALL_ERROR_INVALID_METADATA;
}
break;
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_CLOSE_ON_SERVER:
if (op.flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
break;
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
if (got_ops.is_set(op.op)) return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
got_ops.set(op.op);
}
return GRPC_CALL_OK;
}
} // namespace
grpc_call_error ServerCall::StartBatch(const grpc_op* ops, size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
if (nops == 0) {
EndOpImmediately(cq_, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
const grpc_call_error validation_result = ValidateServerBatch(ops, nops);
if (validation_result != GRPC_CALL_OK) {
return validation_result;
}
CommitBatch(ops, nops, notify_tag, is_notify_tag_closure);
return GRPC_CALL_OK;
}
void ServerCall::CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) {
BatchOpIndex op_index(ops, nops);
if (!is_notify_tag_closure) grpc_cq_begin_op(cq_, notify_tag);
auto send_initial_metadata =
op_index.OpHandler<GRPC_OP_SEND_INITIAL_METADATA>([this](
const grpc_op& op) {
auto metadata = arena()->MakePooled<ServerMetadata>();
PrepareOutgoingInitialMetadata(op, *metadata);
CToMetadata(op.data.send_initial_metadata.metadata,
op.data.send_initial_metadata.count, metadata.get());
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%s[call] Send initial metadata",
DebugTag().c_str());
}
return [this, metadata = std::move(metadata)]() mutable {
return call_handler_.PushServerInitialMetadata(std::move(metadata));
};
});
auto send_message =
op_index.OpHandler<GRPC_OP_SEND_MESSAGE>([this](const grpc_op& op) {
SliceBuffer send;
grpc_slice_buffer_swap(
&op.data.send_message.send_message->data.raw.slice_buffer,
send.c_slice_buffer());
auto msg = arena()->MakePooled<Message>(std::move(send), op.flags);
return [this, msg = std::move(msg)]() mutable {
return call_handler_.PushMessage(std::move(msg));
};
});
auto send_trailing_metadata =
op_index.OpHandler<GRPC_OP_SEND_STATUS_FROM_SERVER>(
[this](const grpc_op& op) {
auto metadata = arena()->MakePooled<ServerMetadata>();
CToMetadata(op.data.send_status_from_server.trailing_metadata,
op.data.send_status_from_server.trailing_metadata_count,
metadata.get());
metadata->Set(GrpcStatusMetadata(),
op.data.send_status_from_server.status);
if (auto* details =
op.data.send_status_from_server.status_details) {
// TODO(ctiller): this should not be a copy, but we have
// callers that allocate and pass in a slice created with
// grpc_slice_from_static_string and then delete the string
// after passing it in, which shouldn't be a supported API.
metadata->Set(GrpcMessageMetadata(),
Slice(grpc_slice_copy(*details)));
}
CHECK(metadata != nullptr);
return [this, metadata = std::move(metadata)]() mutable {
CHECK(metadata != nullptr);
return [this, metadata = std::move(
metadata)]() mutable -> Poll<Success> {
CHECK(metadata != nullptr);
call_handler_.PushServerTrailingMetadata(std::move(metadata));
return Success{};
};
};
});
auto recv_message =
op_index.OpHandler<GRPC_OP_RECV_MESSAGE>([this](const grpc_op& op) {
return message_receiver_.MakeBatchOp(op, &call_handler_);
});
auto primary_ops = AllOk<StatusFlag>(
TrySeq(AllOk<StatusFlag>(std::move(send_initial_metadata),
std::move(send_message)),
std::move(send_trailing_metadata)),
std::move(recv_message));
if (auto* op = op_index.op(GRPC_OP_RECV_CLOSE_ON_SERVER)) {
auto recv_trailing_metadata = OpHandler<GRPC_OP_RECV_CLOSE_ON_SERVER>(
[this, cancelled = op->data.recv_close_on_server.cancelled]() {
return Map(call_handler_.WasCancelled(),
[cancelled, this](bool result) -> Success {
ResetDeadline();
*cancelled = result ? 1 : 0;
return Success{};
});
});
call_handler_.SpawnInfallible(
"final-batch", InfallibleBatch(std::move(primary_ops),
std::move(recv_trailing_metadata),
is_notify_tag_closure, notify_tag, cq_));
} else {
call_handler_.SpawnInfallible(
"batch", FallibleBatch(std::move(primary_ops), is_notify_tag_closure,
notify_tag, cq_));
}
}
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata) {
PublishMetadataArray(client_initial_metadata.get(), publish_initial_metadata,
false);
// TODO(ctiller): ideally we'd put this in the arena with the CallHandler,
// but there's an ownership problem: CallHandler owns the arena, and so would
// get destroyed before the base class Call destructor runs, leading to
// UB/crash. Investigate another path.
return (new ServerCall(std::move(client_initial_metadata),
std::move(call_handler), server, cq))
->c_ptr();
}
} // namespace grpc_core

@ -0,0 +1,167 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H
#define GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H
#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/impl/call.h>
#include <grpc/impl/propagation_bits.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/server/server_interface.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
namespace grpc_core {
class ServerCall final : public Call, public DualRefCounted<ServerCall> {
public:
ServerCall(ClientMetadataHandle client_initial_metadata,
CallHandler call_handler, ServerInterface* server,
grpc_completion_queue* cq)
: Call(false,
client_initial_metadata->get(GrpcTimeoutMetadata())
.value_or(Timestamp::InfFuture()),
call_handler.arena()->Ref(), call_handler.event_engine()),
call_handler_(std::move(call_handler)),
client_initial_metadata_stored_(std::move(client_initial_metadata)),
cq_(cq),
server_(server) {
global_stats().IncrementServerCallsCreated();
}
void CancelWithError(grpc_error_handle error) override {
call_handler_.SpawnInfallible(
"CancelWithError",
[self = WeakRefAsSubclass<ServerCall>(), error = std::move(error)] {
auto status = ServerMetadataFromStatus(error);
status->Set(GrpcCallWasCancelled(), true);
self->call_handler_.PushServerTrailingMetadata(std::move(status));
return Empty{};
});
}
bool is_trailers_only() const override {
Crash("is_trailers_only not implemented for server calls");
}
absl::string_view GetServerAuthority() const override {
Crash("unimplemented");
}
grpc_call_error StartBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure) override;
void ExternalRef() override { Ref().release(); }
void ExternalUnref() override { Unref(); }
void InternalRef(const char*) override { WeakRef().release(); }
void InternalUnref(const char*) override { WeakUnref(); }
void Orphaned() override {
// TODO(ctiller): only when we're not already finished
CancelWithError(absl::CancelledError());
}
void SetCompletionQueue(grpc_completion_queue*) override {
Crash("unimplemented");
}
grpc_compression_options compression_options() override {
return server_->compression_options();
}
grpc_call_stack* call_stack() override { return nullptr; }
char* GetPeer() override {
Slice peer_slice = GetPeerString();
if (!peer_slice.empty()) {
absl::string_view peer_string_view = peer_slice.as_string_view();
char* peer_string =
static_cast<char*>(gpr_malloc(peer_string_view.size() + 1));
memcpy(peer_string, peer_string_view.data(), peer_string_view.size());
peer_string[peer_string_view.size()] = '\0';
return peer_string;
}
return gpr_strdup("unknown");
}
bool Completed() final { Crash("unimplemented"); }
bool failed_before_recv_message() const final { Crash("unimplemented"); }
uint32_t test_only_message_flags() override {
return message_receiver_.last_message_flags();
}
grpc_compression_algorithm incoming_compression_algorithm() override {
return message_receiver_.incoming_compression_algorithm();
}
void SetIncomingCompressionAlgorithm(
grpc_compression_algorithm algorithm) override {
message_receiver_.SetIncomingCompressionAlgorithm(algorithm);
}
private:
void CommitBatch(const grpc_op* ops, size_t nops, void* notify_tag,
bool is_notify_tag_closure);
std::string DebugTag() { return absl::StrFormat("SERVER_CALL[%p]: ", this); }
CallHandler call_handler_;
MessageReceiver message_receiver_;
ClientMetadataHandle client_initial_metadata_stored_;
grpc_completion_queue* const cq_;
ServerInterface* const server_;
};
grpc_call* MakeServerCall(CallHandler call_handler,
ClientMetadataHandle client_initial_metadata,
ServerInterface* server, grpc_completion_queue* cq,
grpc_metadata_array* publish_initial_metadata);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_SERVER_CALL_H

@ -1,75 +0,0 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/surface/wait_for_cq_end_op.h"
#include <atomic>
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/promise/trace.h"
namespace grpc_core {
Poll<Empty> WaitForCqEndOp::operator()() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sWaitForCqEndOp[%p] %s",
Activity::current()->DebugTag().c_str(), this,
StateString(state_).c_str());
}
if (auto* n = absl::get_if<NotStarted>(&state_)) {
if (n->is_closure) {
ExecCtx::Run(DEBUG_LOCATION, static_cast<grpc_closure*>(n->tag),
std::move(n->error));
return Empty{};
} else {
auto not_started = std::move(*n);
auto& started =
state_.emplace<Started>(GetContext<Activity>()->MakeOwningWaker());
grpc_cq_end_op(
not_started.cq, not_started.tag, std::move(not_started.error),
[](void* p, grpc_cq_completion*) {
auto started = static_cast<Started*>(p);
auto wakeup = std::move(started->waker);
started->done.store(true, std::memory_order_release);
wakeup.Wakeup();
},
&started, &started.completion);
}
}
auto& started = absl::get<Started>(state_);
if (started.done.load(std::memory_order_acquire)) {
return Empty{};
} else {
return Pending{};
}
}
std::string WaitForCqEndOp::StateString(const State& state) {
return Match(
state,
[](const NotStarted& x) {
return absl::StrFormat(
"NotStarted{is_closure=%s, tag=%p, error=%s, cq=%p}",
x.is_closure ? "true" : "false", x.tag, x.error.ToString(), x.cq);
},
[](const Started& x) {
return absl::StrFormat(
"Started{completion=%p, done=%s}", &x.completion,
x.done.load(std::memory_order_relaxed) ? "true" : "false");
},
[](const Invalid&) -> std::string { return "Invalid{}"; });
}
} // namespace grpc_core

@ -1,72 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H
#define GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/surface/completion_queue.h"
namespace grpc_core {
// Defines a promise that calls grpc_cq_end_op() (on first poll) and then waits
// for the callback supplied to grpc_cq_end_op() to be called, before resolving
// to Empty{}
class WaitForCqEndOp {
public:
WaitForCqEndOp(bool is_closure, void* tag, grpc_error_handle error,
grpc_completion_queue* cq)
: state_{NotStarted{is_closure, tag, std::move(error), cq}} {}
Poll<Empty> operator()();
WaitForCqEndOp(const WaitForCqEndOp&) = delete;
WaitForCqEndOp& operator=(const WaitForCqEndOp&) = delete;
WaitForCqEndOp(WaitForCqEndOp&& other) noexcept
: state_(std::move(absl::get<NotStarted>(other.state_))) {
other.state_.emplace<Invalid>();
}
WaitForCqEndOp& operator=(WaitForCqEndOp&& other) noexcept {
state_ = std::move(absl::get<NotStarted>(other.state_));
other.state_.emplace<Invalid>();
return *this;
}
private:
struct NotStarted {
bool is_closure;
void* tag;
grpc_error_handle error;
grpc_completion_queue* cq;
};
struct Started {
explicit Started(Waker waker) : waker(std::move(waker)) {}
Waker waker;
grpc_cq_completion completion;
std::atomic<bool> done{false};
};
struct Invalid {};
using State = absl::variant<NotStarted, Started, Invalid>;
static std::string StateString(const State& state);
State state_{Invalid{}};
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_SURFACE_WAIT_FOR_CQ_END_OP_H

@ -1,171 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/transport/batch_builder.h"
#include <type_traits>
#include "absl/log/check.h"
#include <grpc/support/port_platform.h>
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
BatchBuilder::BatchBuilder(grpc_transport_stream_op_batch_payload* payload)
: payload_(payload) {}
void BatchBuilder::PendingCompletion::CompletionCallback(
void* self, grpc_error_handle error) {
auto* pc = static_cast<PendingCompletion*>(self);
auto* party = pc->batch->party.get();
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sFinish batch-component %s: status=%s",
pc->batch->DebugPrefix(party).c_str(),
std::string(pc->name()).c_str(), error.ToString().c_str());
}
party->Spawn(
"batch-completion",
[pc, error = std::move(error)]() mutable {
RefCountedPtr<Batch> batch = std::exchange(pc->batch, nullptr);
pc->done_latch.Set(std::move(error));
return Empty{};
},
[](Empty) {});
}
BatchBuilder::PendingCompletion::PendingCompletion(RefCountedPtr<Batch> batch)
: batch(std::move(batch)) {
GRPC_CLOSURE_INIT(&on_done_closure, CompletionCallback, this, nullptr);
}
BatchBuilder::Batch::Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount)
: party(GetContext<Party>()->Ref()), stream_refcount(stream_refcount) {
batch.payload = payload;
batch.is_traced = GetContext<CallContext>()->traced();
#ifndef NDEBUG
grpc_stream_ref(stream_refcount, "pending-batch");
#else
grpc_stream_ref(stream_refcount);
#endif
}
BatchBuilder::Batch::~Batch() {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] [batch %p] Destroy",
GetContext<Activity>()->DebugTag().c_str(), this);
}
delete pending_receive_message;
delete pending_receive_initial_metadata;
delete pending_receive_trailing_metadata;
delete pending_sends;
if (batch.cancel_stream) {
delete batch.payload;
}
#ifndef NDEBUG
grpc_stream_unref(stream_refcount, "pending-batch");
#else
grpc_stream_unref(stream_refcount);
#endif
}
BatchBuilder::Batch* BatchBuilder::GetBatch(Target target) {
if (target_.has_value() &&
(target_->stream != target.stream ||
target.transport->filter_stack_transport()
->HackyDisableStreamOpBatchCoalescingInConnectedChannel())) {
FlushBatch();
}
if (!target_.has_value()) {
target_ = target;
batch_ = GetContext<Arena>()->NewPooled<Batch>(payload_,
target_->stream_refcount);
}
CHECK_NE(batch_, nullptr);
return batch_;
}
void BatchBuilder::FlushBatch() {
CHECK_NE(batch_, nullptr);
CHECK(target_.has_value());
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG, "%sPerform transport stream op batch: %p %s",
batch_->DebugPrefix().c_str(), &batch_->batch,
grpc_transport_stream_op_batch_string(&batch_->batch, false).c_str());
}
std::exchange(batch_, nullptr)->PerformWith(*target_);
target_.reset();
}
void BatchBuilder::Batch::PerformWith(Target target) {
target.transport->filter_stack_transport()->PerformStreamOp(target.stream,
&batch);
}
ServerMetadataHandle BatchBuilder::CompleteSendServerTrailingMetadata(
Batch* batch, ServerMetadataHandle sent_metadata, absl::Status send_result,
bool actually_sent) {
if (!send_result.ok()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG,
"%sSend metadata failed with error: %s, fabricating trailing "
"metadata",
batch->DebugPrefix().c_str(), send_result.ToString().c_str());
}
sent_metadata->Clear();
sent_metadata->Set(GrpcStatusMetadata(),
static_cast<grpc_status_code>(send_result.code()));
sent_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(send_result.message()));
sent_metadata->Set(GrpcCallWasCancelled(), true);
}
if (!sent_metadata->get(GrpcCallWasCancelled()).has_value()) {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"%sTagging trailing metadata with cancellation status from "
"transport: %s",
batch->DebugPrefix().c_str(),
actually_sent ? "sent => not-cancelled" : "not-sent => cancelled");
}
sent_metadata->Set(GrpcCallWasCancelled(), !actually_sent);
}
return sent_metadata;
}
BatchBuilder::Batch* BatchBuilder::MakeCancel(
grpc_stream_refcount* stream_refcount, absl::Status status) {
auto* arena = GetContext<Arena>();
auto* payload = arena->NewPooled<grpc_transport_stream_op_batch_payload>();
auto* batch = arena->NewPooled<Batch>(payload, stream_refcount);
batch->batch.cancel_stream = true;
payload->cancel_stream.cancel_error = std::move(status);
return batch;
}
void BatchBuilder::Cancel(Target target, absl::Status status) {
auto* batch = MakeCancel(target.stream_refcount, std::move(status));
batch->batch.on_complete =
NewClosure([batch](absl::Status) { delete batch; });
batch->PerformWith(target);
}
} // namespace grpc_core

@ -1,474 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include <grpc/status.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/party.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
// Build up a transport stream op batch for a stream for a promise based
// connected channel.
// Offered as a context from Call, so that it can collect ALL the updates during
// a single party round, and then push them down to the transport as a single
// transaction.
class BatchBuilder {
public:
explicit BatchBuilder(grpc_transport_stream_op_batch_payload* payload);
~BatchBuilder() {
if (batch_ != nullptr) FlushBatch();
}
struct Target {
Transport* transport;
grpc_stream* stream;
grpc_stream_refcount* stream_refcount;
};
BatchBuilder(const BatchBuilder&) = delete;
BatchBuilder& operator=(const BatchBuilder&) = delete;
// Returns a promise that will resolve to a Status when the send is completed.
auto SendMessage(Target target, MessageHandle message);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientInitialMetadata(Target target, ClientMetadataHandle metadata);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendServerInitialMetadata(Target target, ServerMetadataHandle metadata);
// Returns a promise that will resolve to a ServerMetadataHandle when the send
// is completed.
//
// If convert_to_cancellation is true, then the status will be converted to a
// cancellation batch instead of a trailing metadata op in a coalesced batch.
//
// This quirk exists as in the filter based stack upon which our transports
// were written if a trailing metadata op were sent it always needed to be
// paired with an initial op batch, and the transports would wait for the
// initial metadata batch to arrive (in case of reordering up the stack).
auto SendServerTrailingMetadata(Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation);
// Returns a promise that will resolve to a StatusOr<optional<MessageHandle>>
// when a message is received.
// Error => non-ok status
// End of stream => Ok, nullopt (no message)
// Message => Ok, message
auto ReceiveMessage(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerTrailingMetadata(Target target);
// Send a cancellation: does not occupy the same payload, nor does it
// coalesce with other ops.
void Cancel(Target target, absl::Status status);
private:
struct Batch;
// Base pending operation
struct PendingCompletion {
explicit PendingCompletion(RefCountedPtr<Batch> batch);
virtual absl::string_view name() const = 0;
static void CompletionCallback(void* self, grpc_error_handle error);
grpc_closure on_done_closure;
Latch<absl::Status> done_latch;
RefCountedPtr<Batch> batch;
protected:
~PendingCompletion() = default;
};
// A pending receive message.
struct PendingReceiveMessage final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "receive_message"; }
MessageHandle IntoMessageHandle() {
return Arena::MakePooled<Message>(std::move(*payload), flags);
}
absl::optional<SliceBuffer> payload;
uint32_t flags;
bool call_failed_before_recv_message = false;
};
// A pending receive metadata.
struct PendingReceiveMetadata : public PendingCompletion {
using PendingCompletion::PendingCompletion;
Arena::PoolPtr<grpc_metadata_batch> metadata =
Arena::MakePooled<grpc_metadata_batch>();
protected:
~PendingReceiveMetadata() = default;
};
struct PendingReceiveInitialMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_initial_metadata";
}
};
struct PendingReceiveTrailingMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_trailing_metadata";
}
};
// Pending sends in a batch
struct PendingSends final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "sends"; }
MessageHandle send_message;
Arena::PoolPtr<grpc_metadata_batch> send_initial_metadata;
Arena::PoolPtr<grpc_metadata_batch> send_trailing_metadata;
bool trailing_metadata_sent = false;
};
// One outstanding batch.
struct Batch final {
Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount);
~Batch();
Batch(const Batch&) = delete;
Batch& operator=(const Batch&) = delete;
std::string DebugPrefix(Activity* activity = GetContext<Activity>()) const {
return absl::StrFormat("%s[connected] [batch %p] ", activity->DebugTag(),
this);
}
void IncrementRefCount() { ++refs; }
void Unref() {
if (--refs == 0) delete this;
}
RefCountedPtr<Batch> Ref() {
IncrementRefCount();
return RefCountedPtr<Batch>(this);
}
// Get an initialized pending completion.
// There are four pending completions potentially contained within a batch.
// They can be rather large so we don't create all of them always. Instead,
// we dynamically create them on the arena as needed.
// This method either returns the existing completion in a batch if that
// completion has already been initialized, or it creates a new completion
// and returns that.
template <typename T>
T* GetInitializedCompletion(T*(Batch::*field)) {
if (this->*field != nullptr) return this->*field;
this->*field = new T(Ref());
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sAdd batch closure for %s @ %s",
DebugPrefix().c_str(),
std::string((this->*field)->name()).c_str(),
(this->*field)->on_done_closure.DebugString().c_str());
}
return this->*field;
}
// grpc_transport_perform_stream_op on target.stream
void PerformWith(Target target);
// Take a promise, and return a promise that holds a ref on this batch until
// the promise completes or is cancelled.
template <typename P>
auto RefUntil(P promise) {
return [self = Ref(), promise = std::move(promise)]() mutable {
return promise();
};
}
grpc_transport_stream_op_batch batch;
PendingReceiveMessage* pending_receive_message = nullptr;
PendingReceiveInitialMetadata* pending_receive_initial_metadata = nullptr;
PendingReceiveTrailingMetadata* pending_receive_trailing_metadata = nullptr;
PendingSends* pending_sends = nullptr;
const RefCountedPtr<Party> party;
grpc_stream_refcount* const stream_refcount;
uint8_t refs = 0;
};
// Get a batch for the given target.
// Currently: if the current batch is for this target, return it - otherwise
// flush the batch and start a new one (and return that).
// This function may change in the future to allow multiple batches to be
// building at once (if that turns out to be useful for hedging).
Batch* GetBatch(Target target);
// Flush the current batch down to the transport.
void FlushBatch();
// Create a cancel batch with its own payload.
Batch* MakeCancel(grpc_stream_refcount* stream_refcount, absl::Status status);
// Note: we don't distinguish between client and server metadata here.
// At the time of writing they're both the same thing - and it's unclear
// whether we'll get to separate them prior to batches going away or not.
// So for now we claim YAGNI and just do the simplest possible implementation.
auto SendInitialMetadata(Target target,
Arena::PoolPtr<grpc_metadata_batch> md);
auto ReceiveInitialMetadata(Target target);
auto ReceiveTrailingMetadata(Target target);
// Combine send status and server metadata into a final status to report back
// to the containing call.
static ServerMetadataHandle CompleteSendServerTrailingMetadata(
Batch* batch, ServerMetadataHandle sent_metadata,
absl::Status send_result, bool actually_sent);
grpc_transport_stream_op_batch_payload* const payload_;
absl::optional<Target> target_;
Batch* batch_ = nullptr;
};
inline auto BatchBuilder::SendMessage(Target target, MessageHandle message) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send message: %s", batch->DebugPrefix().c_str(),
message->DebugString().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_message = true;
payload_->send_message.send_message = message->payload();
payload_->send_message.flags = message->flags();
pc->send_message = std::move(message);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendInitialMetadata(
Target target, Arena::PoolPtr<grpc_metadata_batch> md) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send initial metadata: %s",
batch->DebugPrefix().c_str(), md->DebugString().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_initial_metadata = true;
payload_->send_initial_metadata.send_initial_metadata = md.get();
pc->send_initial_metadata = std::move(md);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendClientInitialMetadata(
Target target, ClientMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendClientTrailingMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue send trailing metadata",
batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_trailing_metadata = true;
auto metadata = Arena::MakePooled<grpc_metadata_batch>();
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = nullptr;
pc->send_trailing_metadata = std::move(metadata);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendServerInitialMetadata(
Target target, ServerMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendServerTrailingMetadata(
Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation) {
Batch* batch;
PendingSends* pc;
if (convert_to_cancellation) {
const auto status_code =
metadata->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
auto status = grpc_error_set_int(
absl::Status(static_cast<absl::StatusCode>(status_code),
metadata->GetOrCreatePointer(GrpcMessageMetadata())
->as_string_view()),
StatusIntProperty::kRpcStatus, status_code);
batch = MakeCancel(target.stream_refcount, std::move(status));
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
} else {
batch = GetBatch(target);
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.send_trailing_metadata = true;
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = &pc->trailing_metadata_sent;
}
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s%s: %s", batch->DebugPrefix().c_str(),
convert_to_cancellation ? "Send trailing metadata as cancellation"
: "Queue send trailing metadata",
metadata->DebugString().c_str());
}
batch->batch.on_complete = &pc->on_done_closure;
pc->send_trailing_metadata = std::move(metadata);
auto promise = Map(pc->done_latch.WaitAndCopy(),
[pc, batch = batch->Ref()](absl::Status status) {
return CompleteSendServerTrailingMetadata(
batch.get(), std::move(pc->send_trailing_metadata),
std::move(status), pc->trailing_metadata_sent);
});
if (convert_to_cancellation) {
batch->PerformWith(target);
}
return promise;
}
inline auto BatchBuilder::ReceiveMessage(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive message", batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(&Batch::pending_receive_message);
batch->batch.recv_message = true;
payload_->recv_message.recv_message_ready = &pc->on_done_closure;
payload_->recv_message.recv_message = &pc->payload;
payload_->recv_message.flags = &pc->flags;
payload_->recv_message.call_failed_before_recv_message =
&pc->call_failed_before_recv_message;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status)
-> absl::StatusOr<absl::optional<MessageHandle>> {
if (!status.ok()) return status;
if (!pc->payload.has_value()) {
if (pc->call_failed_before_recv_message) {
return absl::CancelledError();
}
return absl::nullopt;
}
return pc->IntoMessageHandle();
}));
}
inline auto BatchBuilder::ReceiveInitialMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive initial metadata",
batch->DebugPrefix().c_str());
}
auto* pc =
batch->GetInitializedCompletion(&Batch::pending_receive_initial_metadata);
batch->batch.recv_initial_metadata = true;
payload_->recv_initial_metadata.recv_initial_metadata_ready =
&pc->on_done_closure;
payload_->recv_initial_metadata.recv_initial_metadata = pc->metadata.get();
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ClientMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveServerInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveTrailingMetadata(Target target) {
auto* batch = GetBatch(target);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%sQueue receive trailing metadata",
batch->DebugPrefix().c_str());
}
auto* pc = batch->GetInitializedCompletion(
&Batch::pending_receive_trailing_metadata);
batch->batch.recv_trailing_metadata = true;
payload_->recv_trailing_metadata.recv_trailing_metadata_ready =
&pc->on_done_closure;
payload_->recv_trailing_metadata.recv_trailing_metadata = pc->metadata.get();
payload_->recv_trailing_metadata.collect_stats =
&GetContext<CallContext>()->call_stats()->transport_stream_stats;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ServerMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
inline auto BatchBuilder::ReceiveServerTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
template <>
struct ContextType<BatchBuilder> {};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H

@ -230,8 +230,8 @@ void CallFilters::CancelDueToFailedPipeOperation(SourceLocation but_where) {
"Cancelling due to failed pipe operation: %s",
DebugString().c_str());
}
server_trailing_metadata_ =
ServerMetadataFromStatus(absl::CancelledError("Failed pipe operation"));
PushServerTrailingMetadata(
ServerMetadataFromStatus(absl::CancelledError("Failed pipe operation")));
server_trailing_metadata_waiter_.Wake();
}

@ -40,13 +40,23 @@ namespace grpc_core {
// The common middle part of a call - a reference is held by each of
// CallInitiator and CallHandler - which provide interfaces that are appropriate
// for each side of a call.
// The spine will ultimately host the pipes, filters, and context for one part
// of a call: ie top-half client channel, sub channel call, server call.
// TODO(ctiller): eventually drop this when we don't need to reference into
// legacy promise calls anymore
class CallSpineInterface {
// Hosts context, call filters, and the arena.
class CallSpine final : public Party {
public:
virtual ~CallSpineInterface() = default;
static RefCountedPtr<CallSpine> Create(
ClientMetadataHandle client_initial_metadata,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena) {
Arena* arena_ptr = arena.get();
return RefCountedPtr<CallSpine>(arena_ptr->New<CallSpine>(
std::move(client_initial_metadata), event_engine, std::move(arena)));
}
~CallSpine() override {}
CallFilters& call_filters() { return call_filters_; }
Arena* arena() { return arena_.get(); }
// Add a callback to be called when server trailing metadata is received.
void OnDone(absl::AnyInvocable<void()> fn) {
if (on_done_ == nullptr) {
@ -61,38 +71,70 @@ class CallSpineInterface {
void CallOnDone() {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)();
}
virtual Party& party() = 0;
virtual Arena* arena() = 0;
virtual void IncrementRefCount() = 0;
virtual void Unref() = 0;
virtual Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>>
PullServerInitialMetadata() = 0;
virtual Promise<ServerMetadataHandle> PullServerTrailingMetadata() = 0;
virtual Promise<StatusFlag> PushClientToServerMessage(
MessageHandle message) = 0;
virtual Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullClientToServerMessage() = 0;
virtual Promise<StatusFlag> PushServerToClientMessage(
MessageHandle message) = 0;
virtual Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullServerToClientMessage() = 0;
virtual void PushServerTrailingMetadata(ServerMetadataHandle md) = 0;
virtual void FinishSends() = 0;
virtual Promise<ValueOrFailure<ClientMetadataHandle>>
PullClientInitialMetadata() = 0;
virtual Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) = 0;
virtual Promise<bool> WasCancelled() = 0;
virtual ClientMetadata& UnprocessedClientInitialMetadata() = 0;
virtual void V2HackToStartCallWithoutACallFilterStack() = 0;
auto PullServerInitialMetadata() {
return call_filters().PullServerInitialMetadata();
}
auto PullServerTrailingMetadata() {
return call_filters().PullServerTrailingMetadata();
}
auto PushClientToServerMessage(MessageHandle message) {
return call_filters().PushClientToServerMessage(std::move(message));
}
auto PullClientToServerMessage() {
return call_filters().PullClientToServerMessage();
}
auto PushServerToClientMessage(MessageHandle message) {
return call_filters().PushServerToClientMessage(std::move(message));
}
auto PullServerToClientMessage() {
return call_filters().PullServerToClientMessage();
}
void PushServerTrailingMetadata(ServerMetadataHandle md) {
call_filters().PushServerTrailingMetadata(std::move(md));
}
void FinishSends() { call_filters().FinishClientToServerSends(); }
auto PullClientInitialMetadata() {
return call_filters().PullClientInitialMetadata();
}
auto PushServerInitialMetadata(absl::optional<ServerMetadataHandle> md) {
bool has_md = md.has_value();
return If(
has_md,
[this, md = std::move(md)]() mutable {
return call_filters().PushServerInitialMetadata(std::move(*md));
},
[this]() {
call_filters().NoServerInitialMetadata();
return Immediate<StatusFlag>(Success{});
});
}
auto WasCancelled() { return call_filters().WasCancelled(); }
ClientMetadata& UnprocessedClientInitialMetadata() {
return *call_filters().unprocessed_client_initial_metadata();
}
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_;
}
// Wrap a promise so that if it returns failure it automatically cancels
// the rest of the call.
// The resulting (returned) promise will resolve to Empty.
template <typename Promise>
auto CancelIfFails(Promise promise) {
DCHECK(GetContext<Activity>() == &party());
DCHECK(GetContext<Activity>() == this);
using P = promise_detail::PromiseLike<Promise>;
using ResultType = typename P::Result;
return Map(std::move(promise), [this](ResultType r) {
@ -107,7 +149,7 @@ class CallSpineInterface {
// that detail.
template <typename PromiseFactory>
void SpawnInfallible(absl::string_view name, PromiseFactory promise_factory) {
party().Spawn(name, std::move(promise_factory), [](Empty) {});
Spawn(name, std::move(promise_factory), [](Empty) {});
}
// Spawn a promise that returns some status-like type; if the status
@ -123,18 +165,17 @@ class CallSpineInterface {
std::is_same<bool,
decltype(IsStatusOk(std::declval<ResultType>()))>::value,
"SpawnGuarded promise must return a status-like object");
party().Spawn(
name, std::move(promise_factory), [this, whence](ResultType r) {
if (!IsStatusOk(r)) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "SpawnGuarded sees failure: %s (source: %s:%d)",
r.ToString().c_str(), whence.file(), whence.line());
}
auto status = StatusCast<ServerMetadataHandle>(std::move(r));
status->Set(GrpcCallWasCancelled(), true);
PushServerTrailingMetadata(std::move(status));
}
});
Spawn(name, std::move(promise_factory), [this, whence](ResultType r) {
if (!IsStatusOk(r)) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "SpawnGuarded sees failure: %s (source: %s:%d)",
r.ToString().c_str(), whence.file(), whence.line());
}
auto status = StatusCast<ServerMetadataHandle>(std::move(r));
status->Set(GrpcCallWasCancelled(), true);
PushServerTrailingMetadata(std::move(status));
}
});
}
// Wrap a promise so that if the call completes that promise is cancelled.
@ -154,217 +195,6 @@ class CallSpineInterface {
});
}
private:
absl::AnyInvocable<void()> on_done_{nullptr};
};
// Implementation of CallSpine atop the v2 Pipe based arrangement.
// This implementation will go away in favor of an implementation atop
// CallFilters by the time v3 lands.
class PipeBasedCallSpine : public CallSpineInterface {
public:
virtual Pipe<ClientMetadataHandle>& client_initial_metadata() = 0;
virtual Pipe<ServerMetadataHandle>& server_initial_metadata() = 0;
virtual Pipe<MessageHandle>& client_to_server_messages() = 0;
virtual Pipe<MessageHandle>& server_to_client_messages() = 0;
virtual Latch<ServerMetadataHandle>& cancel_latch() = 0;
virtual Latch<bool>& was_cancelled_latch() = 0;
Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>>
PullServerInitialMetadata() final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_initial_metadata().receiver.Next(),
[](NextResult<ServerMetadataHandle> md)
-> ValueOrFailure<absl::optional<ServerMetadataHandle>> {
if (!md.has_value()) {
if (md.cancelled()) return Failure{};
return absl::optional<ServerMetadataHandle>();
}
return absl::optional<ServerMetadataHandle>(std::move(*md));
});
}
Promise<ServerMetadataHandle> PullServerTrailingMetadata() final {
DCHECK(GetContext<Activity>() == &party());
return cancel_latch().Wait();
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullServerToClientMessage() final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_to_client_messages().receiver.Next(), MapNextMessage);
}
Promise<StatusFlag> PushClientToServerMessage(MessageHandle message) final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_to_server_messages().sender.Push(std::move(message)),
[](bool r) { return StatusFlag(r); });
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullClientToServerMessage() final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_to_server_messages().receiver.Next(), MapNextMessage);
}
Promise<StatusFlag> PushServerToClientMessage(MessageHandle message) final {
DCHECK(GetContext<Activity>() == &party());
return Map(server_to_client_messages().sender.Push(std::move(message)),
[](bool r) { return StatusFlag(r); });
}
void FinishSends() final {
DCHECK(GetContext<Activity>() == &party());
client_to_server_messages().sender.Close();
}
void PushServerTrailingMetadata(ServerMetadataHandle metadata) final {
DCHECK(GetContext<Activity>() == &party());
auto& c = cancel_latch();
if (c.is_set()) return;
const bool was_cancelled =
metadata->get(GrpcCallWasCancelled()).value_or(false);
c.Set(std::move(metadata));
CallOnDone();
was_cancelled_latch().Set(was_cancelled);
client_initial_metadata().sender.CloseWithError();
server_initial_metadata().sender.Close();
client_to_server_messages().sender.CloseWithError();
server_to_client_messages().sender.Close();
}
Promise<bool> WasCancelled() final {
DCHECK(GetContext<Activity>() == &party());
return was_cancelled_latch().Wait();
}
Promise<ValueOrFailure<ClientMetadataHandle>> PullClientInitialMetadata()
final {
DCHECK(GetContext<Activity>() == &party());
return Map(client_initial_metadata().receiver.Next(),
[](NextResult<ClientMetadataHandle> md)
-> ValueOrFailure<ClientMetadataHandle> {
if (!md.has_value()) return Failure{};
return std::move(*md);
});
}
Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) final {
DCHECK(GetContext<Activity>() == &party());
return If(
md.has_value(),
[&md, this]() {
return Map(server_initial_metadata().sender.Push(std::move(*md)),
[](bool ok) { return StatusFlag(ok); });
},
[this]() {
server_initial_metadata().sender.Close();
return []() -> StatusFlag { return Success{}; };
});
}
private:
static ValueOrFailure<absl::optional<MessageHandle>> MapNextMessage(
NextResult<MessageHandle> r) {
if (!r.has_value()) {
if (r.cancelled()) return Failure{};
return absl::optional<MessageHandle>();
}
return absl::optional<MessageHandle>(std::move(*r));
}
};
class CallSpine final : public CallSpineInterface, public Party {
public:
static RefCountedPtr<CallSpine> Create(
ClientMetadataHandle client_initial_metadata,
grpc_event_engine::experimental::EventEngine* event_engine,
RefCountedPtr<Arena> arena) {
auto* arena_ptr = arena.get();
return RefCountedPtr<CallSpine>(arena_ptr->New<CallSpine>(
std::move(client_initial_metadata), event_engine, std::move(arena)));
}
~CallSpine() override {}
CallFilters& call_filters() { return call_filters_; }
Party& party() override { return *this; }
Arena* arena() override { return arena_.get(); }
void IncrementRefCount() override { Party::IncrementRefCount(); }
void Unref() override { Party::Unref(); }
Promise<ValueOrFailure<absl::optional<ServerMetadataHandle>>>
PullServerInitialMetadata() override {
return call_filters().PullServerInitialMetadata();
}
Promise<ServerMetadataHandle> PullServerTrailingMetadata() override {
return call_filters().PullServerTrailingMetadata();
}
Promise<StatusFlag> PushClientToServerMessage(
MessageHandle message) override {
return call_filters().PushClientToServerMessage(std::move(message));
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullClientToServerMessage() override {
return call_filters().PullClientToServerMessage();
}
Promise<StatusFlag> PushServerToClientMessage(
MessageHandle message) override {
return call_filters().PushServerToClientMessage(std::move(message));
}
Promise<ValueOrFailure<absl::optional<MessageHandle>>>
PullServerToClientMessage() override {
return call_filters().PullServerToClientMessage();
}
void PushServerTrailingMetadata(ServerMetadataHandle md) override {
call_filters().PushServerTrailingMetadata(std::move(md));
}
void FinishSends() override { call_filters().FinishClientToServerSends(); }
Promise<ValueOrFailure<ClientMetadataHandle>> PullClientInitialMetadata()
override {
return call_filters().PullClientInitialMetadata();
}
Promise<StatusFlag> PushServerInitialMetadata(
absl::optional<ServerMetadataHandle> md) override {
if (md.has_value()) {
return call_filters().PushServerInitialMetadata(std::move(*md));
} else {
call_filters().NoServerInitialMetadata();
return Immediate<StatusFlag>(Success{});
}
}
Promise<bool> WasCancelled() override {
return call_filters().WasCancelled();
}
ClientMetadata& UnprocessedClientInitialMetadata() override {
return *call_filters().unprocessed_client_initial_metadata();
}
grpc_event_engine::experimental::EventEngine* event_engine() const override {
return event_engine_;
}
void V2HackToStartCallWithoutACallFilterStack() override {
CallFilters::StackBuilder empty_stack_builder;
call_filters().SetStack(empty_stack_builder.Build());
}
private:
friend class Arena;
CallSpine(ClientMetadataHandle client_initial_metadata,
@ -407,11 +237,13 @@ class CallSpine final : public CallSpineInterface, public Party {
CallFilters call_filters_;
// Event engine associated with this call
grpc_event_engine::experimental::EventEngine* const event_engine_;
absl::AnyInvocable<void()> on_done_{nullptr};
};
class CallInitiator {
public:
explicit CallInitiator(RefCountedPtr<CallSpineInterface> spine)
CallInitiator() = default;
explicit CallInitiator(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {}
template <typename Promise>
@ -435,8 +267,9 @@ class CallInitiator {
return spine_->PullServerTrailingMetadata();
}
void Cancel() {
auto status = ServerMetadataFromStatus(absl::CancelledError());
void Cancel(absl::Status error = absl::CancelledError()) {
CHECK(!error.ok());
auto status = ServerMetadataFromStatus(error);
status->Set(GrpcCallWasCancelled(), true);
spine_->PushServerTrailingMetadata(std::move(status));
}
@ -461,18 +294,22 @@ class CallInitiator {
template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory));
return spine_->SpawnWaitable(name, std::move(promise_factory));
}
Arena* arena() { return spine_->arena(); }
grpc_event_engine::experimental::EventEngine* event_engine() const {
return spine_->event_engine();
}
private:
RefCountedPtr<CallSpineInterface> spine_;
RefCountedPtr<CallSpine> spine_;
};
class CallHandler {
public:
explicit CallHandler(RefCountedPtr<CallSpineInterface> spine)
explicit CallHandler(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {}
auto PullClientInitialMetadata() {
@ -521,22 +358,22 @@ class CallHandler {
template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory));
return spine_->SpawnWaitable(name, std::move(promise_factory));
}
Arena* arena() { return spine_->arena(); }
grpc_event_engine::experimental::EventEngine* event_engine() const {
return DownCast<CallSpine*>(spine_.get())->event_engine();
return spine_->event_engine();
}
private:
RefCountedPtr<CallSpineInterface> spine_;
RefCountedPtr<CallSpine> spine_;
};
class UnstartedCallHandler {
public:
explicit UnstartedCallHandler(RefCountedPtr<CallSpineInterface> spine)
explicit UnstartedCallHandler(RefCountedPtr<CallSpine> spine)
: spine_(std::move(spine)) {}
void PushServerTrailingMetadata(ServerMetadataHandle status) {
@ -569,29 +406,28 @@ class UnstartedCallHandler {
template <typename PromiseFactory>
auto SpawnWaitable(absl::string_view name, PromiseFactory promise_factory) {
return spine_->party().SpawnWaitable(name, std::move(promise_factory));
return spine_->SpawnWaitable(name, std::move(promise_factory));
}
ClientMetadata& UnprocessedClientInitialMetadata() {
return spine_->UnprocessedClientInitialMetadata();
}
CallHandler V2HackToStartCallWithoutACallFilterStack() {
spine_->V2HackToStartCallWithoutACallFilterStack();
return CallHandler(std::move(spine_));
// Helper for the very common situation in tests where we want to start a call
// with an empty filter stack.
CallHandler StartWithEmptyFilterStack() {
return StartCall(CallFilters::StackBuilder().Build());
}
CallHandler StartCall(RefCountedPtr<CallFilters::Stack> call_filters) {
DownCast<CallSpine*>(spine_.get())
->call_filters()
.SetStack(std::move(call_filters));
spine_->call_filters().SetStack(std::move(call_filters));
return CallHandler(std::move(spine_));
}
Arena* arena() { return spine_->arena(); }
private:
RefCountedPtr<CallSpineInterface> spine_;
RefCountedPtr<CallSpine> spine_;
};
struct CallInitiatorAndHandler {

@ -550,7 +550,7 @@ class GrpcLb final : public LoadBalancingPolicy {
bool shutting_down_ = false;
// The channel for communicating with the LB server.
OrphanablePtr<Channel> lb_channel_;
RefCountedPtr<Channel> lb_channel_;
StateWatcher* watcher_ = nullptr;
// Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_;

@ -456,6 +456,19 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
absl::Status status_;
};
// A picker that returns PickResult::Drop for all picks.
class DropPicker final : public SubchannelPicker {
public:
explicit DropPicker(absl::Status status) : status_(status) {}
PickResult Pick(PickArgs /*args*/) override {
return PickResult::Drop(status_);
}
private:
absl::Status status_;
};
protected:
std::shared_ptr<WorkSerializer> work_serializer() const {
return work_serializer_;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save