[promises] Convert call to a party (#32359)

<!--

If you know who should review your pull request, please assign it to
that
person, otherwise the pull request would get assigned randomly.

If your pull request is for a specific language, please add the
appropriate
lang label.

-->

---------

Co-authored-by: ctiller <ctiller@users.noreply.github.com>
create-pull-request/patch-93f0266
Craig Tiller 2 years ago committed by GitHub
parent 5029af9578
commit a9873e8357
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 11
      BUILD
  2. 78
      CMakeLists.txt
  3. 4
      Makefile
  4. 90
      build_autogenerated.yaml
  5. 2
      config.m4
  6. 2
      config.w32
  7. 8
      gRPC-C++.podspec
  8. 10
      gRPC-Core.podspec
  9. 6
      grpc.gemspec
  10. 10
      grpc.gyp
  11. 6
      package.xml
  12. 60
      src/core/BUILD
  13. 2
      src/core/ext/filters/client_channel/client_channel.h
  14. 4
      src/core/ext/filters/client_channel/retry_filter.cc
  15. 6
      src/core/ext/filters/http/client/http_client_filter.cc
  16. 19
      src/core/ext/filters/http/message_compress/compression_filter.cc
  17. 365
      src/core/ext/filters/message_size/message_size_filter.cc
  18. 51
      src/core/ext/filters/message_size/message_size_filter.h
  19. 1
      src/core/ext/transport/binder/transport/binder_transport.cc
  20. 8
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  21. 3
      src/core/ext/transport/chttp2/transport/internal.h
  22. 1
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  23. 34
      src/core/ext/transport/inproc/inproc_transport.cc
  24. 1530
      src/core/lib/channel/connected_channel.cc
  25. 133
      src/core/lib/channel/promise_based_filter.cc
  26. 31
      src/core/lib/channel/promise_based_filter.h
  27. 7
      src/core/lib/gprpp/orphanable.h
  28. 67
      src/core/lib/gprpp/ref_counted.h
  29. 16
      src/core/lib/gprpp/thd.h
  30. 4
      src/core/lib/iomgr/call_combiner.h
  31. 28
      src/core/lib/promise/activity.cc
  32. 85
      src/core/lib/promise/activity.h
  33. 10
      src/core/lib/promise/context.h
  34. 4
      src/core/lib/promise/detail/promise_factory.h
  35. 9
      src/core/lib/promise/if.h
  36. 25
      src/core/lib/promise/interceptor_list.h
  37. 55
      src/core/lib/promise/intra_activity_waiter.h
  38. 92
      src/core/lib/promise/latch.h
  39. 22
      src/core/lib/promise/loop.h
  40. 7
      src/core/lib/promise/map.h
  41. 295
      src/core/lib/promise/observable.h
  42. 306
      src/core/lib/promise/party.cc
  43. 173
      src/core/lib/promise/party.h
  44. 254
      src/core/lib/promise/pipe.h
  45. 4
      src/core/lib/promise/promise.h
  46. 22
      src/core/lib/resource_quota/arena.cc
  47. 124
      src/core/lib/resource_quota/arena.h
  48. 22
      src/core/lib/security/transport/server_auth_filter.cc
  49. 2
      src/core/lib/slice/slice.cc
  50. 1799
      src/core/lib/surface/call.cc
  51. 5
      src/core/lib/surface/call.h
  52. 1
      src/core/lib/surface/lame_client.cc
  53. 179
      src/core/lib/transport/batch_builder.cc
  54. 468
      src/core/lib/transport/batch_builder.h
  55. 12
      src/core/lib/transport/metadata_batch.h
  56. 32
      src/core/lib/transport/transport.cc
  57. 69
      src/core/lib/transport/transport.h
  58. 7
      src/core/lib/transport/transport_impl.h
  59. 2
      src/python/grpcio/grpc_core_dependencies.py
  60. 32
      test/core/end2end/cq_verifier.cc
  61. 2
      test/core/end2end/fixtures/proxy.cc
  62. 26
      test/core/end2end/tests/filter_init_fails.cc
  63. 20
      test/core/end2end/tests/max_message_length.cc
  64. 6
      test/core/filters/client_auth_filter_test.cc
  65. 6
      test/core/filters/client_authority_filter_test.cc
  66. 13
      test/core/filters/filter_fuzzer.cc
  67. 4
      test/core/gprpp/ref_counted_test.cc
  68. 28
      test/core/gprpp/thd_test.cc
  69. 29
      test/core/promise/BUILD
  70. 2
      test/core/promise/if_test.cc
  71. 44
      test/core/promise/latch_test.cc
  72. 16
      test/core/promise/loop_test.cc
  73. 3
      test/core/promise/map_test.cc
  74. 12
      test/core/promise/mpsc_test.cc
  75. 134
      test/core/promise/observable_test.cc
  76. 109
      test/core/promise/party_test.cc
  77. 54
      test/core/promise/pipe_test.cc
  78. 14
      test/core/promise/promise_factory_test.cc
  79. 1
      test/core/promise/promise_fuzzer.cc
  80. 1
      test/core/resource_quota/arena_test.cc
  81. 15
      test/cpp/microbenchmarks/bm_call_create.cc
  82. 132
      tools/codegen/core/optimize_arena_pool_sizes.py
  83. 6
      tools/doxygen/Doxyfile.c++.internal
  84. 6
      tools/doxygen/Doxyfile.core.internal
  85. 24
      tools/run_tests/generated/tests.json

11
BUILD

@ -699,6 +699,7 @@ grpc_cc_library(
external_deps = [
"absl/base",
"absl/base:core_headers",
"absl/functional:any_invocable",
"absl/memory",
"absl/random",
"absl/status",
@ -1309,6 +1310,7 @@ grpc_cc_library(
"//src/core:lib/transport/timeout_encoding.cc",
"//src/core:lib/transport/transport.cc",
"//src/core:lib/transport/transport_op_string.cc",
"//src/core:lib/transport/batch_builder.cc",
] +
# TODO(vigneshbabu): remove these
# These headers used to be vended by this target, but they have to be
@ -1400,6 +1402,7 @@ grpc_cc_library(
"//src/core:lib/transport/timeout_encoding.h",
"//src/core:lib/transport/transport.h",
"//src/core:lib/transport/transport_impl.h",
"//src/core:lib/transport/batch_builder.h",
] +
# TODO(vigneshbabu): remove these
# These headers used to be vended by this target, but they have to be
@ -1456,6 +1459,7 @@ grpc_cc_library(
"stats",
"uri_parser",
"work_serializer",
"//src/core:1999",
"//src/core:activity",
"//src/core:arena",
"//src/core:arena_promise",
@ -1483,15 +1487,19 @@ grpc_cc_library(
"//src/core:event_engine_trace",
"//src/core:event_log",
"//src/core:experiments",
"//src/core:for_each",
"//src/core:gpr_atm",
"//src/core:gpr_manual_constructor",
"//src/core:gpr_spinlock",
"//src/core:grpc_sockaddr",
"//src/core:http2_errors",
"//src/core:if",
"//src/core:init_internally",
"//src/core:iomgr_fwd",
"//src/core:iomgr_port",
"//src/core:json",
"//src/core:latch",
"//src/core:loop",
"//src/core:map",
"//src/core:match",
"//src/core:memory_quota",
@ -1503,10 +1511,12 @@ grpc_cc_library(
"//src/core:pollset_set",
"//src/core:posix_event_engine_base_hdrs",
"//src/core:promise_status",
"//src/core:race",
"//src/core:ref_counted",
"//src/core:resolved_address",
"//src/core:resource_quota",
"//src/core:resource_quota_trace",
"//src/core:seq",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:slice_cast",
@ -2339,6 +2349,7 @@ grpc_cc_library(
grpc_cc_library(
name = "promise",
external_deps = [
"absl/functional:any_invocable",
"absl/status",
"absl/types:optional",
],

78
CMakeLists.txt generated

@ -1069,7 +1069,6 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx nonblocking_test)
add_dependencies(buildtests_cxx notification_test)
add_dependencies(buildtests_cxx num_external_connectivity_watchers_test)
add_dependencies(buildtests_cxx observable_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx oracle_event_engine_posix_test)
endif()
@ -1639,6 +1638,7 @@ target_link_libraries(gpr
${_gRPC_ALLTARGETS_LIBRARIES}
absl::base
absl::core_headers
absl::any_invocable
absl::memory
absl::random_random
absl::status
@ -2312,6 +2312,7 @@ add_library(grpc
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/matchers/matchers.cc
src/core/lib/promise/activity.cc
src/core/lib/promise/party.cc
src/core/lib/promise/sleep.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
@ -2415,6 +2416,7 @@ add_library(grpc
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
src/core/lib/transport/batch_builder.cc
src/core/lib/transport/bdp_estimator.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
@ -2506,7 +2508,6 @@ target_link_libraries(grpc
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
absl::any_invocable
absl::bind_front
absl::function_ref
absl::hash
@ -2999,6 +3000,7 @@ add_library(grpc_unsecure
src/core/lib/load_balancing/lb_policy.cc
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/promise/activity.cc
src/core/lib/promise/party.cc
src/core/lib/promise/sleep.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
@ -3071,6 +3073,7 @@ add_library(grpc_unsecure
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
src/core/lib/transport/batch_builder.cc
src/core/lib/transport/bdp_estimator.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
@ -3138,7 +3141,6 @@ target_link_libraries(grpc_unsecure
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
absl::any_invocable
absl::bind_front
absl::function_ref
absl::hash
@ -4516,6 +4518,7 @@ add_library(grpc_authorization_provider
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/matchers/matchers.cc
src/core/lib/promise/activity.cc
src/core/lib/promise/party.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
src/core/lib/resolver/resolver_registry.cc
@ -4586,6 +4589,7 @@ add_library(grpc_authorization_provider
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
src/core/lib/transport/batch_builder.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/handshaker.cc
@ -4644,7 +4648,6 @@ target_link_libraries(grpc_authorization_provider
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -5396,7 +5399,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::algorithm_container
absl::any_invocable
absl::span
${_gRPC_BENCHMARK_LIBRARIES}
gpr
@ -8342,7 +8344,6 @@ target_link_libraries(chunked_vector_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -9091,7 +9092,6 @@ target_link_libraries(common_closures_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::statusor
gpr
)
@ -10119,7 +10119,6 @@ target_link_libraries(endpoint_config_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::type_traits
absl::statusor
gpr
@ -10629,7 +10628,6 @@ target_link_libraries(exec_ctx_wakeup_scheduler_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::hash
absl::type_traits
absl::statusor
@ -11111,7 +11109,6 @@ target_link_libraries(flow_control_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -11182,7 +11179,6 @@ target_link_libraries(for_each_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -11267,7 +11263,6 @@ target_link_libraries(forkable_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::statusor
gpr
)
@ -11577,6 +11572,7 @@ add_executable(frame_test
src/core/lib/load_balancing/lb_policy.cc
src/core/lib/load_balancing/lb_policy_registry.cc
src/core/lib/promise/activity.cc
src/core/lib/promise/party.cc
src/core/lib/promise/trace.cc
src/core/lib/resolver/resolver.cc
src/core/lib/resolver/resolver_registry.cc
@ -11624,6 +11620,7 @@ add_executable(frame_test
src/core/lib/surface/server.cc
src/core/lib/surface/validate_metadata.cc
src/core/lib/surface/version.cc
src/core/lib/transport/batch_builder.cc
src/core/lib/transport/connectivity_state.cc
src/core/lib/transport/error_utils.cc
src/core/lib/transport/handshaker_registry.cc
@ -11668,7 +11665,6 @@ target_link_libraries(frame_test
absl::flat_hash_map
absl::flat_hash_set
absl::inlined_vector
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -14084,7 +14080,6 @@ target_link_libraries(interceptor_list_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -14882,7 +14877,6 @@ target_link_libraries(map_pipe_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::function_ref
absl::hash
absl::type_traits
@ -15673,49 +15667,6 @@ target_link_libraries(num_external_connectivity_watchers_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(observable_test
src/core/lib/promise/activity.cc
test/core/promise/observable_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_compile_features(observable_test PUBLIC cxx_std_14)
target_include_directories(observable_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(observable_test
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::hash
absl::type_traits
absl::statusor
absl::utility
gpr
)
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -16163,7 +16114,6 @@ endif()
if(gRPC_BUILD_TESTS)
add_executable(party_test
src/core/lib/promise/party.cc
test/core/promise/party_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
@ -16283,7 +16233,6 @@ target_link_libraries(periodic_update_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::function_ref
absl::hash
absl::statusor
@ -19075,7 +19024,6 @@ target_link_libraries(slice_string_helpers_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::hash
absl::statusor
gpr
@ -19555,7 +19503,6 @@ target_link_libraries(static_stride_scheduler_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::span
gpr
)
@ -20385,7 +20332,6 @@ target_link_libraries(test_core_event_engine_posix_timer_heap_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::statusor
gpr
)
@ -20428,7 +20374,6 @@ target_link_libraries(test_core_event_engine_posix_timer_list_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::statusor
gpr
)
@ -20477,7 +20422,6 @@ target_link_libraries(test_core_event_engine_slice_buffer_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::hash
absl::statusor
absl::utility
@ -20593,7 +20537,6 @@ target_link_libraries(test_core_gprpp_time_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::any_invocable
absl::statusor
gpr
)
@ -21085,7 +21028,6 @@ target_link_libraries(thread_pool_test
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::any_invocable
absl::statusor
gpr
)
@ -26046,7 +25988,7 @@ generate_pkgconfig(
"gpr"
"gRPC platform support library"
"${gRPC_CORE_VERSION}"
"absl_base absl_cord absl_core_headers absl_memory absl_optional absl_random_random absl_status absl_str_format absl_strings absl_synchronization absl_time absl_variant"
"absl_any_invocable absl_base absl_cord absl_core_headers absl_memory absl_optional absl_random_random absl_status absl_str_format absl_strings absl_synchronization absl_time absl_variant"
""
"-lgpr"
""

4
Makefile generated

@ -1561,6 +1561,7 @@ LIBGRPC_SRC = \
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/matchers/matchers.cc \
src/core/lib/promise/activity.cc \
src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@ -1664,6 +1665,7 @@ LIBGRPC_SRC = \
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \
@ -2101,6 +2103,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/load_balancing/lb_policy.cc \
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/promise/activity.cc \
src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@ -2173,6 +2176,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \

@ -251,6 +251,7 @@ libs:
deps:
- absl/base:base
- absl/base:core_headers
- absl/functional:any_invocable
- absl/memory:memory
- absl/random:random
- absl/status:status
@ -950,12 +951,13 @@ libs:
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@ -1059,6 +1061,7 @@ libs:
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- src/core/lib/transport/batch_builder.h
- src/core/lib/transport/bdp_estimator.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
@ -1710,6 +1713,7 @@ libs:
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/matchers/matchers.cc
- src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
- src/core/lib/promise/sleep.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
@ -1813,6 +1817,7 @@ libs:
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/bdp_estimator.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
@ -1864,7 +1869,6 @@ libs:
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
- absl/functional:any_invocable
- absl/functional:bind_front
- absl/functional:function_ref
- absl/hash:hash
@ -2290,12 +2294,13 @@ libs:
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@ -2370,6 +2375,7 @@ libs:
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- src/core/lib/transport/batch_builder.h
- src/core/lib/transport/bdp_estimator.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
@ -2663,6 +2669,7 @@ libs:
- src/core/lib/load_balancing/lb_policy.cc
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
- src/core/lib/promise/sleep.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
@ -2735,6 +2742,7 @@ libs:
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/bdp_estimator.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
@ -2762,7 +2770,6 @@ libs:
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
- absl/functional:any_invocable
- absl/functional:bind_front
- absl/functional:function_ref
- absl/hash:hash
@ -3748,11 +3755,13 @@ libs:
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@ -3826,6 +3835,7 @@ libs:
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- src/core/lib/transport/batch_builder.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/handshaker.h
@ -4003,6 +4013,7 @@ libs:
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/matchers/matchers.cc
- src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
- src/core/lib/resolver/resolver_registry.cc
@ -4073,6 +4084,7 @@ libs:
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/handshaker.cc
@ -4092,7 +4104,6 @@ libs:
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -4374,7 +4385,6 @@ targets:
- test/core/client_channel/lb_policy/static_stride_scheduler_benchmark.cc
deps:
- absl/algorithm:container
- absl/functional:any_invocable
- absl/types:span
- benchmark
- gpr
@ -5857,7 +5867,6 @@ targets:
- test/core/gprpp/chunked_vector_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -6145,7 +6154,6 @@ targets:
src:
- test/core/event_engine/common_closures_test.cc
deps:
- absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: completion_queue_threading_test
@ -6574,7 +6582,6 @@ targets:
- src/core/lib/surface/channel_stack_type.cc
- test/core/event_engine/endpoint_config_test.cc
deps:
- absl/functional:any_invocable
- absl/meta:type_traits
- absl/status:statusor
- gpr
@ -6850,7 +6857,6 @@ targets:
- src/core/lib/slice/slice_string_helpers.cc
- test/core/promise/exec_ctx_wakeup_scheduler_test.cc
deps:
- absl/functional:any_invocable
- absl/hash:hash
- absl/meta:type_traits
- absl/status:statusor
@ -7155,7 +7161,6 @@ targets:
- test/core/transport/chttp2/flow_control_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -7202,7 +7207,6 @@ targets:
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
@ -7254,7 +7258,6 @@ targets:
- test/core/promise/for_each_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -7288,7 +7291,6 @@ targets:
- test/core/event_engine/forkable_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: format_request_test
@ -7557,11 +7559,13 @@ targets:
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/exec_ctx_wakeup_scheduler.h
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
- src/core/lib/promise/party.h
- src/core/lib/promise/pipe.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
@ -7612,6 +7616,7 @@ targets:
- src/core/lib/surface/lame_client.h
- src/core/lib/surface/server.h
- src/core/lib/surface/validate_metadata.h
- src/core/lib/transport/batch_builder.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/error_utils.h
- src/core/lib/transport/handshaker_factory.h
@ -7795,6 +7800,7 @@ targets:
- src/core/lib/load_balancing/lb_policy.cc
- src/core/lib/load_balancing/lb_policy_registry.cc
- src/core/lib/promise/activity.cc
- src/core/lib/promise/party.cc
- src/core/lib/promise/trace.cc
- src/core/lib/resolver/resolver.cc
- src/core/lib/resolver/resolver_registry.cc
@ -7842,6 +7848,7 @@ targets:
- src/core/lib/surface/server.cc
- src/core/lib/surface/validate_metadata.cc
- src/core/lib/surface/version.cc
- src/core/lib/transport/batch_builder.cc
- src/core/lib/transport/connectivity_state.cc
- src/core/lib/transport/error_utils.cc
- src/core/lib/transport/handshaker_registry.cc
@ -7859,7 +7866,6 @@ targets:
- absl/container:flat_hash_map
- absl/container:flat_hash_set
- absl/container:inlined_vector
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -8950,7 +8956,6 @@ targets:
- test/core/promise/interceptor_list_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -9166,7 +9171,6 @@ targets:
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/latch.h
- src/core/lib/promise/poll.h
@ -9309,7 +9313,6 @@ targets:
- src/core/lib/promise/for_each.h
- src/core/lib/promise/if.h
- src/core/lib/promise/interceptor_list.h
- src/core/lib/promise/intra_activity_waiter.h
- src/core/lib/promise/join.h
- src/core/lib/promise/loop.h
- src/core/lib/promise/map.h
@ -9362,7 +9365,6 @@ targets:
- test/core/promise/map_pipe_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/meta:type_traits
@ -9668,39 +9670,6 @@ targets:
- test/core/surface/num_external_connectivity_watchers_test.cc
deps:
- grpc_test_util
- name: observable_test
gtest: true
build: test
language: c++
headers:
- src/core/lib/gprpp/atomic_utils.h
- src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/promise/activity.h
- src/core/lib/promise/context.h
- src/core/lib/promise/detail/basic_seq.h
- src/core/lib/promise/detail/promise_factory.h
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/detail/status.h
- src/core/lib/promise/detail/switch.h
- src/core/lib/promise/observable.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
- src/core/lib/promise/seq.h
- src/core/lib/promise/wait_set.h
- test/core/promise/test_wakeup_schedulers.h
src:
- src/core/lib/promise/activity.cc
- test/core/promise/observable_test.cc
deps:
- absl/container:flat_hash_set
- absl/hash:hash
- absl/meta:type_traits
- absl/status:statusor
- absl/utility:utility
- gpr
uses_polling: false
- name: oracle_event_engine_posix_test
gtest: true
build: test
@ -9867,10 +9836,8 @@ targets:
gtest: true
build: test
language: c++
headers:
- src/core/lib/promise/party.h
headers: []
src:
- src/core/lib/promise/party.cc
- test/core/promise/party_test.cc
deps:
- grpc_unsecure
@ -9930,7 +9897,6 @@ targets:
- src/core/lib/slice/slice_string_helpers.cc
- test/core/resource_quota/periodic_update_test.cc
deps:
- absl/functional:any_invocable
- absl/functional:function_ref
- absl/hash:hash
- absl/status:statusor
@ -10109,7 +10075,6 @@ targets:
- src/core/lib/promise/detail/promise_factory.h
- src/core/lib/promise/detail/promise_like.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
src:
- test/core/promise/promise_factory_test.cc
deps:
@ -11094,7 +11059,6 @@ targets:
- src/core/lib/slice/slice_string_helpers.cc
- test/core/slice/slice_string_helpers_test.cc
deps:
- absl/functional:any_invocable
- absl/hash:hash
- absl/status:statusor
- gpr
@ -11280,7 +11244,6 @@ targets:
- src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc
- test/core/client_channel/lb_policy/static_stride_scheduler_test.cc
deps:
- absl/functional:any_invocable
- absl/types:span
- gpr
uses_polling: false
@ -11685,7 +11648,6 @@ targets:
- src/core/lib/gprpp/time_averaged_stats.cc
- test/core/event_engine/posix/timer_heap_test.cc
deps:
- absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@ -11705,7 +11667,6 @@ targets:
- src/core/lib/gprpp/time_averaged_stats.cc
- test/core/event_engine/posix/timer_list_test.cc
deps:
- absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@ -11737,7 +11698,6 @@ targets:
- test/core/event_engine/slice_buffer_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/hash:hash
- absl/status:statusor
- absl/utility:utility
@ -11772,7 +11732,6 @@ targets:
- src/core/lib/gprpp/time.cc
- test/core/gprpp/time_test.cc
deps:
- absl/functional:any_invocable
- absl/status:statusor
- gpr
uses_polling: false
@ -11983,7 +11942,6 @@ targets:
- test/core/event_engine/thread_pool_test.cc
deps:
- absl/container:flat_hash_set
- absl/functional:any_invocable
- absl/status:statusor
- gpr
- name: thread_quota_test

2
config.m4 generated

@ -686,6 +686,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/matchers/matchers.cc \
src/core/lib/promise/activity.cc \
src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resolver/resolver.cc \
@ -789,6 +790,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/surface/server.cc \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/version.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/connectivity_state.cc \
src/core/lib/transport/error_utils.cc \

2
config.w32 generated

@ -652,6 +652,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\load_balancing\\lb_policy_registry.cc " +
"src\\core\\lib\\matchers\\matchers.cc " +
"src\\core\\lib\\promise\\activity.cc " +
"src\\core\\lib\\promise\\party.cc " +
"src\\core\\lib\\promise\\sleep.cc " +
"src\\core\\lib\\promise\\trace.cc " +
"src\\core\\lib\\resolver\\resolver.cc " +
@ -755,6 +756,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\surface\\server.cc " +
"src\\core\\lib\\surface\\validate_metadata.cc " +
"src\\core\\lib\\surface\\version.cc " +
"src\\core\\lib\\transport\\batch_builder.cc " +
"src\\core\\lib\\transport\\bdp_estimator.cc " +
"src\\core\\lib\\transport\\connectivity_state.cc " +
"src\\core\\lib\\transport\\error_utils.cc " +

8
gRPC-C++.podspec generated

@ -920,12 +920,13 @@ Pod::Spec.new do |s|
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@ -1029,6 +1030,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',
@ -1855,12 +1857,13 @@ Pod::Spec.new do |s|
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@ -1964,6 +1967,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',

10
gRPC-Core.podspec generated

@ -1495,12 +1495,14 @@ Pod::Spec.new do |s|
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
'src/core/lib/promise/party.cc',
'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@ -1707,6 +1709,8 @@ Pod::Spec.new do |s|
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.cc',
@ -2544,12 +2548,13 @@ Pod::Spec.new do |s|
'src/core/lib/promise/detail/status.h',
'src/core/lib/promise/detail/switch.h',
'src/core/lib/promise/exec_ctx_wakeup_scheduler.h',
'src/core/lib/promise/for_each.h',
'src/core/lib/promise/if.h',
'src/core/lib/promise/interceptor_list.h',
'src/core/lib/promise/intra_activity_waiter.h',
'src/core/lib/promise/latch.h',
'src/core/lib/promise/loop.h',
'src/core/lib/promise/map.h',
'src/core/lib/promise/party.h',
'src/core/lib/promise/pipe.h',
'src/core/lib/promise/poll.h',
'src/core/lib/promise/promise.h',
@ -2653,6 +2658,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/lame_client.h',
'src/core/lib/surface/server.h',
'src/core/lib/surface/validate_metadata.h',
'src/core/lib/transport/batch_builder.h',
'src/core/lib/transport/bdp_estimator.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/error_utils.h',

6
grpc.gemspec generated

@ -1404,12 +1404,14 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/promise/detail/status.h )
s.files += %w( src/core/lib/promise/detail/switch.h )
s.files += %w( src/core/lib/promise/exec_ctx_wakeup_scheduler.h )
s.files += %w( src/core/lib/promise/for_each.h )
s.files += %w( src/core/lib/promise/if.h )
s.files += %w( src/core/lib/promise/interceptor_list.h )
s.files += %w( src/core/lib/promise/intra_activity_waiter.h )
s.files += %w( src/core/lib/promise/latch.h )
s.files += %w( src/core/lib/promise/loop.h )
s.files += %w( src/core/lib/promise/map.h )
s.files += %w( src/core/lib/promise/party.cc )
s.files += %w( src/core/lib/promise/party.h )
s.files += %w( src/core/lib/promise/pipe.h )
s.files += %w( src/core/lib/promise/poll.h )
s.files += %w( src/core/lib/promise/promise.h )
@ -1616,6 +1618,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/validate_metadata.cc )
s.files += %w( src/core/lib/surface/validate_metadata.h )
s.files += %w( src/core/lib/surface/version.cc )
s.files += %w( src/core/lib/transport/batch_builder.cc )
s.files += %w( src/core/lib/transport/batch_builder.h )
s.files += %w( src/core/lib/transport/bdp_estimator.cc )
s.files += %w( src/core/lib/transport/bdp_estimator.h )
s.files += %w( src/core/lib/transport/connectivity_state.cc )

10
grpc.gyp generated

@ -293,6 +293,7 @@
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/functional:any_invocable',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
@ -359,7 +360,6 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
'absl/functional:any_invocable',
'absl/functional:bind_front',
'absl/functional:function_ref',
'absl/hash:hash',
@ -974,6 +974,7 @@
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@ -1077,6 +1078,7 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
@ -1176,7 +1178,6 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
'absl/functional:any_invocable',
'absl/functional:bind_front',
'absl/functional:function_ref',
'absl/hash:hash',
@ -1456,6 +1457,7 @@
'src/core/lib/load_balancing/lb_policy.cc',
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@ -1528,6 +1530,7 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
@ -1793,7 +1796,6 @@
'absl/container:flat_hash_map',
'absl/container:flat_hash_set',
'absl/container:inlined_vector',
'absl/functional:any_invocable',
'absl/functional:function_ref',
'absl/hash:hash',
'absl/meta:type_traits',
@ -1962,6 +1964,7 @@
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/party.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
@ -2032,6 +2035,7 @@
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',

6
package.xml generated

@ -1386,12 +1386,14 @@
<file baseinstalldir="/" name="src/core/lib/promise/detail/status.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/detail/switch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/exec_ctx_wakeup_scheduler.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/for_each.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/if.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/interceptor_list.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/intra_activity_waiter.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/latch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/loop.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/map.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/party.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/party.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/poll.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/promise.h" role="src" />
@ -1598,6 +1600,8 @@
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/validate_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/version.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/batch_builder.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/bdp_estimator.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.cc" role="src" />

@ -413,7 +413,6 @@ grpc_cc_library(
],
external_deps = [
"absl/base:core_headers",
"absl/container:inlined_vector",
"absl/strings",
"absl/strings:str_format",
],
@ -421,9 +420,15 @@ grpc_cc_library(
deps = [
"activity",
"arena",
"construct_destruct",
"context",
"promise_factory",
"promise_trace",
"//:debug_location",
"//:exec_ctx",
"//:gpr",
"//:grpc_trace",
"//:ref_counted_ptr",
],
)
@ -571,6 +576,7 @@ grpc_cc_library(
"lib/promise/loop.h",
],
deps = [
"construct_destruct",
"poll",
"promise_factory",
"//:gpr_platform",
@ -696,6 +702,7 @@ grpc_cc_library(
external_deps = [
"absl/base:core_headers",
"absl/status",
"absl/strings",
"absl/strings:str_format",
"absl/types:optional",
],
@ -708,6 +715,7 @@ grpc_cc_library(
"construct_destruct",
"context",
"no_destruct",
"poll",
"promise_factory",
"promise_status",
"//:gpr",
@ -761,19 +769,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "intra_activity_waiter",
language = "c++",
public_hdrs = [
"lib/promise/intra_activity_waiter.h",
],
deps = [
"activity",
"poll",
"//:gpr_platform",
],
)
grpc_cc_library(
name = "latch",
external_deps = ["absl/strings"],
@ -783,7 +778,6 @@ grpc_cc_library(
],
deps = [
"activity",
"intra_activity_waiter",
"poll",
"promise_trace",
"//:gpr",
@ -791,25 +785,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "observable",
external_deps = [
"absl/base:core_headers",
"absl/types:optional",
],
language = "c++",
public_hdrs = [
"lib/promise/observable.h",
],
deps = [
"activity",
"poll",
"promise_like",
"wait_set",
"//:gpr",
],
)
grpc_cc_library(
name = "interceptor_list",
hdrs = [
@ -839,7 +814,6 @@ grpc_cc_library(
"lib/promise/pipe.h",
],
external_deps = [
"absl/base:core_headers",
"absl/strings",
"absl/types:optional",
"absl/types:variant",
@ -851,7 +825,6 @@ grpc_cc_library(
"context",
"if",
"interceptor_list",
"intra_activity_waiter",
"map",
"poll",
"promise_trace",
@ -3479,33 +3452,38 @@ grpc_cc_library(
"ext/filters/message_size/message_size_filter.h",
],
external_deps = [
"absl/status",
"absl/status:statusor",
"absl/strings",
"absl/strings:str_format",
"absl/types:optional",
],
language = "c++",
deps = [
"activity",
"arena",
"arena_promise",
"channel_args",
"channel_fwd",
"channel_init",
"channel_stack_type",
"closure",
"error",
"context",
"grpc_service_config",
"json",
"json_args",
"json_object_loader",
"latch",
"poll",
"race",
"service_config_parser",
"slice",
"slice_buffer",
"status_helper",
"validation_errors",
"//:channel_stack_builder",
"//:config",
"//:debug_location",
"//:gpr",
"//:grpc_base",
"//:grpc_public_hdrs",
"//:grpc_trace",
],
)

@ -363,7 +363,7 @@ class ClientChannel {
// TODO(roth): As part of simplifying cancellation in the filter stack,
// this should no longer need to be ref-counted.
class ClientChannel::LoadBalancedCall
: public InternallyRefCounted<LoadBalancedCall, kUnrefCallDtor> {
: public InternallyRefCounted<LoadBalancedCall, UnrefCallDtor> {
public:
LoadBalancedCall(
ClientChannel* chand, grpc_call_context_element* call_context,

@ -269,7 +269,7 @@ class RetryFilter::CallData {
// We allocate one struct on the arena for each attempt at starting a
// batch on a given LB call.
class BatchData
: public RefCounted<BatchData, PolymorphicRefCount, kUnrefCallDtor> {
: public RefCounted<BatchData, PolymorphicRefCount, UnrefCallDtor> {
public:
BatchData(RefCountedPtr<CallAttempt> call_attempt, int refcount,
bool set_on_complete);
@ -648,7 +648,7 @@ class RetryFilter::CallData {
// on_call_stack_destruction closure from the surface.
class RetryFilter::CallData::CallStackDestructionBarrier
: public RefCounted<CallStackDestructionBarrier, PolymorphicRefCount,
kUnrefCallDtor> {
UnrefCallDtor> {
public:
CallStackDestructionBarrier() {}

@ -133,13 +133,13 @@ ArenaPromise<ServerMetadataHandle> HttpClientFilter::MakeCallPromise(
return std::move(md);
});
return Race(Map(next_promise_factory(std::move(call_args)),
return Race(initial_metadata_err->Wait(),
Map(next_promise_factory(std::move(call_args)),
[](ServerMetadataHandle md) -> ServerMetadataHandle {
auto r = CheckServerMetadata(md.get());
if (!r.ok()) return ServerMetadataFromStatus(r);
return md;
}),
initial_metadata_err->Wait());
}));
}
HttpClientFilter::HttpClientFilter(HttpSchemeMetadata::ValueType scheme,

@ -252,7 +252,7 @@ ArenaPromise<ServerMetadataHandle> ClientCompressionFilter::MakeCallPromise(
return CompressMessage(std::move(message), compression_algorithm);
});
auto* decompress_args = GetContext<Arena>()->New<DecompressArgs>(
DecompressArgs{GRPC_COMPRESS_NONE, absl::nullopt});
DecompressArgs{GRPC_COMPRESS_ALGORITHMS_COUNT, absl::nullopt});
auto* decompress_err =
GetContext<Arena>()->New<Latch<ServerMetadataHandle>>();
call_args.server_initial_metadata->InterceptAndMap(
@ -273,8 +273,8 @@ ArenaPromise<ServerMetadataHandle> ClientCompressionFilter::MakeCallPromise(
return std::move(*r);
});
// Run the next filter, and race it with getting an error from decompression.
return Race(next_promise_factory(std::move(call_args)),
decompress_err->Wait());
return Race(decompress_err->Wait(),
next_promise_factory(std::move(call_args)));
}
ArenaPromise<ServerMetadataHandle> ServerCompressionFilter::MakeCallPromise(
@ -288,7 +288,8 @@ ArenaPromise<ServerMetadataHandle> ServerCompressionFilter::MakeCallPromise(
this](MessageHandle message) -> absl::optional<MessageHandle> {
auto r = DecompressMessage(std::move(message), decompress_args);
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "DecompressMessage returned %s",
gpr_log(GPR_DEBUG, "%s[compression] DecompressMessage returned %s",
Activity::current()->DebugTag().c_str(),
r.status().ToString().c_str());
}
if (!r.ok()) {
@ -314,13 +315,9 @@ ArenaPromise<ServerMetadataHandle> ServerCompressionFilter::MakeCallPromise(
this](MessageHandle message) -> absl::optional<MessageHandle> {
return CompressMessage(std::move(message), *compression_algorithm);
});
// Concurrently:
// - call the next filter
// - decompress incoming messages
// - wait for initial metadata to be sent, and then commence compression of
// outgoing messages
return Race(next_promise_factory(std::move(call_args)),
decompress_err->Wait());
// Run the next filter, and race it with getting an error from decompression.
return Race(decompress_err->Wait(),
next_promise_factory(std::move(call_args)));
}
} // namespace grpc_core

@ -18,10 +18,13 @@
#include "src/core/ext/filters/message_size/message_size_filter.h"
#include <inttypes.h>
#include <functional>
#include <initializer_list>
#include <new>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <grpc/grpc.h>
@ -32,21 +35,22 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/race.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/service_config/service_config_call_data.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
static void recv_message_ready(void* user_data, grpc_error_handle error);
static void recv_trailing_metadata_ready(void* user_data,
grpc_error_handle error);
namespace grpc_core {
//
@ -124,251 +128,164 @@ size_t MessageSizeParser::ParserIndex() {
parser_name());
}
} // namespace grpc_core
namespace {
struct channel_data {
grpc_core::MessageSizeParsedConfig limits;
const size_t service_config_parser_index{
grpc_core::MessageSizeParser::ParserIndex()};
};
//
// MessageSizeFilter
//
struct call_data {
call_data(grpc_call_element* elem, const channel_data& chand,
const grpc_call_element_args& args)
: call_combiner(args.call_combiner), limits(chand.limits) {
GRPC_CLOSURE_INIT(&recv_message_ready, ::recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
::recv_trailing_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
// size to the receive limit.
const grpc_core::MessageSizeParsedConfig* config_from_call_context =
grpc_core::MessageSizeParsedConfig::GetFromCallContext(
args.context, chand.service_config_parser_index);
if (config_from_call_context != nullptr) {
absl::optional<uint32_t> max_send_size = limits.max_send_size();
absl::optional<uint32_t> max_recv_size = limits.max_recv_size();
if (config_from_call_context->max_send_size().has_value() &&
(!max_send_size.has_value() ||
*config_from_call_context->max_send_size() < *max_send_size)) {
max_send_size = *config_from_call_context->max_send_size();
const grpc_channel_filter ClientMessageSizeFilter::kFilter =
MakePromiseBasedFilter<ClientMessageSizeFilter, FilterEndpoint::kClient,
kFilterExaminesOutboundMessages |
kFilterExaminesInboundMessages>("message_size");
const grpc_channel_filter ServerMessageSizeFilter::kFilter =
MakePromiseBasedFilter<ServerMessageSizeFilter, FilterEndpoint::kServer,
kFilterExaminesOutboundMessages |
kFilterExaminesInboundMessages>("message_size");
class MessageSizeFilter::CallBuilder {
private:
auto Interceptor(uint32_t max_length, bool is_send) {
return [max_length, is_send,
err = err_](MessageHandle msg) -> absl::optional<MessageHandle> {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_INFO, "%s[message_size] %s len:%" PRIdPTR " max:%d",
Activity::current()->DebugTag().c_str(),
is_send ? "send" : "recv", msg->payload()->Length(),
max_length);
}
if (config_from_call_context->max_recv_size().has_value() &&
(!max_recv_size.has_value() ||
*config_from_call_context->max_recv_size() < *max_recv_size)) {
max_recv_size = *config_from_call_context->max_recv_size();
if (msg->payload()->Length() > max_length) {
if (err->is_set()) return std::move(msg);
auto r = GetContext<Arena>()->MakePooled<ServerMetadata>(
GetContext<Arena>());
r->Set(GrpcStatusMetadata(), GRPC_STATUS_RESOURCE_EXHAUSTED);
r->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(
absl::StrFormat("%s message larger than max (%u vs. %d)",
is_send ? "Sent" : "Received",
msg->payload()->Length(), max_length)));
err->Set(std::move(r));
return absl::nullopt;
}
limits = grpc_core::MessageSizeParsedConfig(max_send_size, max_recv_size);
}
return std::move(msg);
};
}
~call_data() {}
grpc_core::CallCombiner* call_combiner;
grpc_core::MessageSizeParsedConfig limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
grpc_closure recv_message_ready;
grpc_closure recv_trailing_metadata_ready;
// The error caused by a message that is too large, or absl::OkStatus()
grpc_error_handle error;
// Used by recv_message_ready.
absl::optional<grpc_core::SliceBuffer>* recv_message = nullptr;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready = nullptr;
// Original recv_trailing_metadata callback, invoked after our own.
grpc_closure* original_recv_trailing_metadata_ready;
bool seen_recv_trailing_metadata = false;
grpc_error_handle recv_trailing_metadata_error;
};
} // namespace
public:
explicit CallBuilder(const MessageSizeParsedConfig& limits)
: limits_(limits) {}
// Callback invoked when we receive a message. Here we check the max
// receive message size.
static void recv_message_ready(void* user_data, grpc_error_handle error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->recv_message->has_value() &&
calld->limits.max_recv_size().has_value() &&
(*calld->recv_message)->Length() >
static_cast<size_t>(*calld->limits.max_recv_size())) {
grpc_error_handle new_error = grpc_error_set_int(
GRPC_ERROR_CREATE(absl::StrFormat(
"Received message larger than max (%u vs. %d)",
(*calld->recv_message)->Length(), *calld->limits.max_recv_size())),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_RESOURCE_EXHAUSTED);
error = grpc_error_add_child(error, new_error);
calld->error = error;
template <typename T>
void AddSend(T* pipe_end) {
if (!limits_.max_send_size().has_value()) return;
pipe_end->InterceptAndMap(Interceptor(*limits_.max_send_size(), true));
}
// Invoke the next callback.
grpc_closure* closure = calld->next_recv_message_ready;
calld->next_recv_message_ready = nullptr;
if (calld->seen_recv_trailing_metadata) {
// We might potentially see another RECV_MESSAGE op. In that case, we do not
// want to run the recv_trailing_metadata_ready closure again. The newer
// RECV_MESSAGE op cannot cause any errors since the transport has already
// invoked the recv_trailing_metadata_ready closure and all further
// RECV_MESSAGE ops will get null payloads.
calld->seen_recv_trailing_metadata = false;
GRPC_CALL_COMBINER_START(calld->call_combiner,
&calld->recv_trailing_metadata_ready,
calld->recv_trailing_metadata_error,
"continue recv_trailing_metadata_ready");
template <typename T>
void AddRecv(T* pipe_end) {
if (!limits_.max_recv_size().has_value()) return;
pipe_end->InterceptAndMap(Interceptor(*limits_.max_recv_size(), false));
}
grpc_core::Closure::Run(DEBUG_LOCATION, closure, error);
}
// Callback invoked on completion of recv_trailing_metadata
// Notifies the recv_trailing_metadata batch of any message size failures
static void recv_trailing_metadata_ready(void* user_data,
grpc_error_handle error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->next_recv_message_ready != nullptr) {
calld->seen_recv_trailing_metadata = true;
calld->recv_trailing_metadata_error = error;
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
"deferring recv_trailing_metadata_ready until "
"after recv_message_ready");
return;
ArenaPromise<ServerMetadataHandle> Run(
CallArgs call_args, NextPromiseFactory next_promise_factory) {
return Race(err_->Wait(), next_promise_factory(std::move(call_args)));
}
error = grpc_error_add_child(error, calld->error);
// Invoke the next callback.
grpc_core::Closure::Run(DEBUG_LOCATION,
calld->original_recv_trailing_metadata_ready, error);
}
// Start transport stream op.
static void message_size_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
call_data* calld = static_cast<call_data*>(elem->call_data);
// Check max send message size.
if (op->send_message && calld->limits.max_send_size().has_value() &&
op->payload->send_message.send_message->Length() >
static_cast<size_t>(*calld->limits.max_send_size())) {
grpc_transport_stream_op_batch_finish_with_failure(
op,
grpc_error_set_int(GRPC_ERROR_CREATE(absl::StrFormat(
"Sent message larger than max (%u vs. %d)",
op->payload->send_message.send_message->Length(),
*calld->limits.max_send_size())),
grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_RESOURCE_EXHAUSTED),
calld->call_combiner);
return;
}
// Inject callback for receiving a message.
if (op->recv_message) {
calld->next_recv_message_ready =
op->payload->recv_message.recv_message_ready;
calld->recv_message = op->payload->recv_message.recv_message;
op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
}
// Inject callback for receiving trailing metadata.
if (op->recv_trailing_metadata) {
calld->original_recv_trailing_metadata_ready =
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready;
}
// Chain to the next filter.
grpc_call_next_op(elem, op);
}
private:
Latch<ServerMetadataHandle>* const err_ =
GetContext<Arena>()->ManagedNew<Latch<ServerMetadataHandle>>();
MessageSizeParsedConfig limits_;
};
// Constructor for call_data.
static grpc_error_handle message_size_init_call_elem(
grpc_call_element* elem, const grpc_call_element_args* args) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
new (elem->call_data) call_data(elem, *chand, *args);
return absl::OkStatus();
absl::StatusOr<ClientMessageSizeFilter> ClientMessageSizeFilter::Create(
const ChannelArgs& args, ChannelFilter::Args) {
return ClientMessageSizeFilter(args);
}
// Destructor for call_data.
static void message_size_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
absl::StatusOr<ServerMessageSizeFilter> ServerMessageSizeFilter::Create(
const ChannelArgs& args, ChannelFilter::Args) {
return ServerMessageSizeFilter(args);
}
// Constructor for channel_data.
static grpc_error_handle message_size_init_channel_elem(
grpc_channel_element* elem, grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
new (chand) channel_data();
chand->limits = grpc_core::MessageSizeParsedConfig::GetFromChannelArgs(
args->channel_args);
return absl::OkStatus();
}
ArenaPromise<ServerMetadataHandle> ClientMessageSizeFilter::MakeCallPromise(
CallArgs call_args, NextPromiseFactory next_promise_factory) {
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
// size to the receive limit.
MessageSizeParsedConfig limits = this->limits();
const MessageSizeParsedConfig* config_from_call_context =
MessageSizeParsedConfig::GetFromCallContext(
GetContext<grpc_call_context_element>(),
service_config_parser_index_);
if (config_from_call_context != nullptr) {
absl::optional<uint32_t> max_send_size = limits.max_send_size();
absl::optional<uint32_t> max_recv_size = limits.max_recv_size();
if (config_from_call_context->max_send_size().has_value() &&
(!max_send_size.has_value() ||
*config_from_call_context->max_send_size() < *max_send_size)) {
max_send_size = *config_from_call_context->max_send_size();
}
if (config_from_call_context->max_recv_size().has_value() &&
(!max_recv_size.has_value() ||
*config_from_call_context->max_recv_size() < *max_recv_size)) {
max_recv_size = *config_from_call_context->max_recv_size();
}
limits = MessageSizeParsedConfig(max_send_size, max_recv_size);
}
// Destructor for channel_data.
static void message_size_destroy_channel_elem(grpc_channel_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->~channel_data();
CallBuilder b(limits);
b.AddSend(call_args.client_to_server_messages);
b.AddRecv(call_args.server_to_client_messages);
return b.Run(std::move(call_args), std::move(next_promise_factory));
}
const grpc_channel_filter grpc_message_size_filter = {
message_size_start_transport_stream_op_batch,
nullptr,
grpc_channel_next_op,
sizeof(call_data),
message_size_init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
message_size_destroy_call_elem,
sizeof(channel_data),
message_size_init_channel_elem,
grpc_channel_stack_no_post_init,
message_size_destroy_channel_elem,
grpc_channel_next_get_info,
"message_size"};
ArenaPromise<ServerMetadataHandle> ServerMessageSizeFilter::MakeCallPromise(
CallArgs call_args, NextPromiseFactory next_promise_factory) {
CallBuilder b(limits());
b.AddSend(call_args.server_to_client_messages);
b.AddRecv(call_args.client_to_server_messages);
return b.Run(std::move(call_args), std::move(next_promise_factory));
}
namespace {
// Used for GRPC_CLIENT_SUBCHANNEL
static bool maybe_add_message_size_filter_subchannel(
grpc_core::ChannelStackBuilder* builder) {
bool MaybeAddMessageSizeFilterToSubchannel(ChannelStackBuilder* builder) {
if (builder->channel_args().WantMinimalStack()) {
return true;
}
builder->PrependFilter(&grpc_message_size_filter);
builder->PrependFilter(&ClientMessageSizeFilter::kFilter);
return true;
}
// Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the filter
// only if message size limits or service config is specified.
static bool maybe_add_message_size_filter(
grpc_core::ChannelStackBuilder* builder) {
auto channel_args = builder->channel_args();
if (channel_args.WantMinimalStack()) {
// Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the
// filter only if message size limits or service config is specified.
auto MaybeAddMessageSizeFilter(const grpc_channel_filter* filter) {
return [filter](ChannelStackBuilder* builder) {
auto channel_args = builder->channel_args();
if (channel_args.WantMinimalStack()) {
return true;
}
MessageSizeParsedConfig limits =
MessageSizeParsedConfig::GetFromChannelArgs(channel_args);
const bool enable =
limits.max_send_size().has_value() ||
limits.max_recv_size().has_value() ||
channel_args.GetString(GRPC_ARG_SERVICE_CONFIG).has_value();
if (enable) builder->PrependFilter(filter);
return true;
}
grpc_core::MessageSizeParsedConfig limits =
grpc_core::MessageSizeParsedConfig::GetFromChannelArgs(channel_args);
const bool enable =
limits.max_send_size().has_value() ||
limits.max_recv_size().has_value() ||
channel_args.GetString(GRPC_ARG_SERVICE_CONFIG).has_value();
if (enable) builder->PrependFilter(&grpc_message_size_filter);
return true;
};
}
namespace grpc_core {
} // namespace
void RegisterMessageSizeFilter(CoreConfiguration::Builder* builder) {
MessageSizeParser::Register(builder);
builder->channel_init()->RegisterStage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter_subchannel);
builder->channel_init()->RegisterStage(GRPC_CLIENT_DIRECT_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter);
builder->channel_init()->RegisterStage(GRPC_SERVER_CHANNEL,
builder->channel_init()->RegisterStage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter);
MaybeAddMessageSizeFilterToSubchannel);
builder->channel_init()->RegisterStage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
MaybeAddMessageSizeFilter(&ClientMessageSizeFilter::kFilter));
builder->channel_init()->RegisterStage(
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
MaybeAddMessageSizeFilter(&ServerMessageSizeFilter::kFilter));
}
} // namespace grpc_core

@ -24,21 +24,22 @@
#include <memory>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_fwd.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/channel/promise_based_filter.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/gprpp/validation_errors.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/json/json_args.h"
#include "src/core/lib/json/json_object_loader.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/service_config/service_config_parser.h"
extern const grpc_channel_filter grpc_message_size_filter;
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
@ -85,6 +86,50 @@ class MessageSizeParser : public ServiceConfigParser::Parser {
absl::optional<uint32_t> GetMaxRecvSizeFromChannelArgs(const ChannelArgs& args);
absl::optional<uint32_t> GetMaxSendSizeFromChannelArgs(const ChannelArgs& args);
class MessageSizeFilter : public ChannelFilter {
protected:
explicit MessageSizeFilter(const ChannelArgs& args)
: limits_(MessageSizeParsedConfig::GetFromChannelArgs(args)) {}
class CallBuilder;
const MessageSizeParsedConfig& limits() const { return limits_; }
private:
MessageSizeParsedConfig limits_;
};
class ServerMessageSizeFilter final : public MessageSizeFilter {
public:
static const grpc_channel_filter kFilter;
static absl::StatusOr<ServerMessageSizeFilter> Create(
const ChannelArgs& args, ChannelFilter::Args filter_args);
// Construct a promise for one call.
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args, NextPromiseFactory next_promise_factory) override;
private:
using MessageSizeFilter::MessageSizeFilter;
};
class ClientMessageSizeFilter final : public MessageSizeFilter {
public:
static const grpc_channel_filter kFilter;
static absl::StatusOr<ClientMessageSizeFilter> Create(
const ChannelArgs& args, ChannelFilter::Args filter_args);
// Construct a promise for one call.
ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args, NextPromiseFactory next_promise_factory) override;
private:
const size_t service_config_parser_index_{MessageSizeParser::ParserIndex()};
using MessageSizeFilter::MessageSizeFilter;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H

@ -694,6 +694,7 @@ static grpc_endpoint* get_endpoint(grpc_transport*) {
// See grpc_transport_vtable declaration for meaning of each field
static const grpc_transport_vtable vtable = {sizeof(grpc_binder_stream),
false,
"binder",
init_stream,
nullptr,

@ -1210,7 +1210,8 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_closure** pclosure,
grpc_error_handle error,
const char* desc) {
const char* desc,
grpc_core::DebugLocation whence) {
grpc_closure* closure = *pclosure;
*pclosure = nullptr;
if (closure == nullptr) {
@ -1221,14 +1222,14 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
"write_state=%s whence=%s:%d",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_core::StatusToString(error).c_str(),
write_state_name(t->write_state));
write_state_name(t->write_state), whence.file(), whence.line());
}
if (s->context != nullptr) {
@ -3078,6 +3079,7 @@ static grpc_endpoint* chttp2_get_endpoint(grpc_transport* t) {
}
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
false,
"chttp2",
init_stream,
nullptr,

@ -709,7 +709,8 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_closure** pclosure,
grpc_error_handle error,
const char* desc);
const char* desc,
grpc_core::DebugLocation whence = {});
#define GRPC_HEADER_SIZE_IN_BYTES 5
#define MAX_SIZE_T (~(size_t)0)

@ -1462,6 +1462,7 @@ static void perform_op(grpc_transport* /*gt*/, grpc_transport_op* /*op*/) {}
static const grpc_transport_vtable grpc_cronet_vtable = {
sizeof(stream_obj),
false,
"cronet_http",
init_stream,
nullptr,

@ -408,7 +408,7 @@ void complete_if_batch_end_locked(inproc_stream* s, grpc_error_handle error,
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
INPROC_LOG(GPR_INFO, "%s %p %p %s", msg, s, op,
INPROC_LOG(GPR_INFO, "%s %p %p %p %s", msg, s, op, op->on_complete,
grpc_core::StatusToString(error).c_str());
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete, error);
}
@ -697,8 +697,9 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
s->to_read_initial_md_filled = false;
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata_ready,
std::exchange(s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata_ready,
nullptr),
absl::OkStatus());
complete_if_batch_end_locked(
s, absl::OkStatus(), s->recv_initial_md_op,
@ -766,6 +767,8 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
nullptr);
s->to_read_trailing_md.Clear();
s->to_read_trailing_md_filled = false;
s->recv_trailing_md_op->payload->recv_trailing_metadata
.recv_trailing_metadata->Set(grpc_core::GrpcStatusFromWire(), true);
// We should schedule the recv_trailing_md_op completion if
// 1. this stream is the client-side
@ -906,8 +909,6 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
return ret;
}
void do_nothing(void* /*arg*/, grpc_error_handle /*error*/) {}
void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
@ -933,8 +934,8 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
// completed). This can go away once we move to a new C++ closure API
// that provides the ability to create a barrier closure.
if (on_complete == nullptr) {
on_complete = GRPC_CLOSURE_INIT(&op->handler_private.closure, do_nothing,
nullptr, grpc_schedule_on_exec_ctx);
on_complete = op->on_complete =
grpc_core::NewClosure([](grpc_error_handle) {});
}
if (op->cancel_stream) {
@ -1177,13 +1178,18 @@ void set_pollset_set(grpc_transport* /*gt*/, grpc_stream* /*gs*/,
grpc_endpoint* get_endpoint(grpc_transport* /*t*/) { return nullptr; }
const grpc_transport_vtable inproc_vtable = {
sizeof(inproc_stream), "inproc",
init_stream, nullptr,
set_pollset, set_pollset_set,
perform_stream_op, perform_transport_op,
destroy_stream, destroy_transport,
get_endpoint};
const grpc_transport_vtable inproc_vtable = {sizeof(inproc_stream),
true,
"inproc",
init_stream,
nullptr,
set_pollset,
set_pollset_set,
perform_stream_op,
perform_transport_op,
destroy_stream,
destroy_transport,
get_endpoint};
//******************************************************************************
// Main inproc transport functions

File diff suppressed because it is too large Load Diff

@ -16,6 +16,8 @@
#include "src/core/lib/channel/promise_based_filter.h"
#include <inttypes.h>
#include <algorithm>
#include <initializer_list>
#include <memory>
@ -52,7 +54,7 @@ class FakeActivity final : public Activity {
explicit FakeActivity(Activity* wake_activity)
: wake_activity_(wake_activity) {}
void Orphan() override {}
void ForceImmediateRepoll() override {}
void ForceImmediateRepoll(WakeupMask) override {}
Waker MakeOwningWaker() override { return wake_activity_->MakeOwningWaker(); }
Waker MakeNonOwningWaker() override {
return wake_activity_->MakeNonOwningWaker();
@ -136,20 +138,22 @@ Waker BaseCallData::MakeNonOwningWaker() { return MakeOwningWaker(); }
Waker BaseCallData::MakeOwningWaker() {
GRPC_CALL_STACK_REF(call_stack_, "waker");
return Waker(this, nullptr);
return Waker(this, 0);
}
void BaseCallData::Wakeup(void*) {
void BaseCallData::Wakeup(WakeupMask) {
auto wakeup = [](void* p, grpc_error_handle) {
auto* self = static_cast<BaseCallData*>(p);
self->OnWakeup();
self->Drop(nullptr);
self->Drop(0);
};
auto* closure = GRPC_CLOSURE_CREATE(wakeup, this, nullptr);
GRPC_CALL_COMBINER_START(call_combiner_, closure, absl::OkStatus(), "wakeup");
}
void BaseCallData::Drop(void*) { GRPC_CALL_STACK_UNREF(call_stack_, "waker"); }
void BaseCallData::Drop(WakeupMask) {
GRPC_CALL_STACK_UNREF(call_stack_, "waker");
}
std::string BaseCallData::LogTag() const {
return absl::StrCat(
@ -217,7 +221,7 @@ void BaseCallData::CapturedBatch::ResumeWith(Flusher* releaser) {
// refcnt==0 ==> cancelled
if (grpc_trace_channel.enabled()) {
gpr_log(GPR_INFO, "%sRESUME BATCH REQUEST CANCELLED",
Activity::current()->DebugTag().c_str());
releaser->call()->DebugTag().c_str());
}
return;
}
@ -241,6 +245,10 @@ void BaseCallData::CapturedBatch::CancelWith(grpc_error_handle error,
auto* batch = std::exchange(batch_, nullptr);
GPR_ASSERT(batch != nullptr);
uintptr_t& refcnt = *RefCountField(batch);
gpr_log(GPR_DEBUG, "%sCancelWith: %p refs=%" PRIdPTR " err=%s [%s]",
releaser->call()->DebugTag().c_str(), batch, refcnt,
error.ToString().c_str(),
grpc_transport_stream_op_batch_string(batch, false).c_str());
if (refcnt == 0) {
// refcnt==0 ==> cancelled
if (grpc_trace_channel.enabled()) {
@ -331,6 +339,8 @@ const char* BaseCallData::SendMessage::StateString(State state) {
return "CANCELLED";
case State::kCancelledButNotYetPolled:
return "CANCELLED_BUT_NOT_YET_POLLED";
case State::kCancelledButNoStatus:
return "CANCELLED_BUT_NO_STATUS";
}
return "UNKNOWN";
}
@ -355,6 +365,7 @@ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
Crash(absl::StrFormat("ILLEGAL STATE: %s", StateString(state_)));
case State::kCancelled:
case State::kCancelledButNotYetPolled:
case State::kCancelledButNoStatus:
return;
}
batch_ = batch;
@ -382,6 +393,7 @@ void BaseCallData::SendMessage::GotPipe(T* pipe_end) {
case State::kForwardedBatch:
case State::kBatchCompleted:
case State::kPushedToPipe:
case State::kCancelledButNoStatus:
Crash(absl::StrFormat("ILLEGAL STATE: %s", StateString(state_)));
case State::kCancelled:
case State::kCancelledButNotYetPolled:
@ -397,6 +409,7 @@ bool BaseCallData::SendMessage::IsIdle() const {
case State::kForwardedBatch:
case State::kCancelled:
case State::kCancelledButNotYetPolled:
case State::kCancelledButNoStatus:
return true;
case State::kGotBatchNoPipe:
case State::kGotBatch:
@ -425,6 +438,7 @@ void BaseCallData::SendMessage::OnComplete(absl::Status status) {
break;
case State::kCancelled:
case State::kCancelledButNotYetPolled:
case State::kCancelledButNoStatus:
flusher.AddClosure(intercepted_on_complete_, status,
"forward after cancel");
break;
@ -449,10 +463,14 @@ void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
case State::kCancelledButNotYetPolled:
break;
case State::kInitial:
state_ = State::kCancelled;
break;
case State::kIdle:
case State::kForwardedBatch:
state_ = State::kCancelledButNotYetPolled;
if (base_->is_current()) base_->ForceImmediateRepoll();
break;
case State::kCancelledButNoStatus:
case State::kGotBatchNoPipe:
case State::kGotBatch: {
std::string temp;
@ -471,6 +489,7 @@ void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
push_.reset();
next_.reset();
state_ = State::kCancelledButNotYetPolled;
if (base_->is_current()) base_->ForceImmediateRepoll();
break;
}
}
@ -489,6 +508,7 @@ void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher,
case State::kIdle:
case State::kGotBatchNoPipe:
case State::kCancelled:
case State::kCancelledButNoStatus:
break;
case State::kCancelledButNotYetPolled:
interceptor()->Push()->Close();
@ -530,13 +550,18 @@ void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher,
"result.has_value=%s",
base_->LogTag().c_str(), p->has_value() ? "true" : "false");
}
GPR_ASSERT(p->has_value());
batch_->payload->send_message.send_message->Swap((**p)->payload());
batch_->payload->send_message.flags = (**p)->flags();
state_ = State::kForwardedBatch;
batch_.ResumeWith(flusher);
next_.reset();
if ((*push_)().ready()) push_.reset();
if (p->has_value()) {
batch_->payload->send_message.send_message->Swap((**p)->payload());
batch_->payload->send_message.flags = (**p)->flags();
state_ = State::kForwardedBatch;
batch_.ResumeWith(flusher);
next_.reset();
if ((*push_)().ready()) push_.reset();
} else {
state_ = State::kCancelledButNoStatus;
next_.reset();
push_.reset();
}
}
} break;
case State::kForwardedBatch:
@ -1094,11 +1119,14 @@ class ClientCallData::PollContext {
// Poll the promise once since we're waiting for it.
Poll<ServerMetadataHandle> poll = self_->promise_();
if (grpc_trace_channel.enabled()) {
gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run: poll=%s",
gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run: poll=%s; %s",
self_->LogTag().c_str(),
PollToString(poll, [](const ServerMetadataHandle& h) {
return h->DebugString();
}).c_str());
PollToString(poll,
[](const ServerMetadataHandle& h) {
return h->DebugString();
})
.c_str(),
self_->DebugString().c_str());
}
if (auto* r = poll.value_if_ready()) {
auto md = std::move(*r);
@ -1278,7 +1306,11 @@ ClientCallData::ClientCallData(grpc_call_element* elem,
[args]() {
return args->arena->New<ReceiveInterceptor>(args->arena);
},
[args]() { return args->arena->New<SendInterceptor>(args->arena); }) {
[args]() { return args->arena->New<SendInterceptor>(args->arena); }),
initial_metadata_outstanding_token_(
(flags & kFilterIsLast) != 0
? ClientInitialMetadataOutstandingToken::New(arena())
: ClientInitialMetadataOutstandingToken::Empty()) {
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
RecvTrailingMetadataReadyCallback, this,
grpc_schedule_on_exec_ctx);
@ -1294,8 +1326,12 @@ ClientCallData::~ClientCallData() {
}
}
std::string ClientCallData::DebugTag() const {
return absl::StrFormat("PBF_CLIENT[%p]: [%s] ", this, elem()->filter->name);
}
// Activity implementation.
void ClientCallData::ForceImmediateRepoll() {
void ClientCallData::ForceImmediateRepoll(WakeupMask) {
GPR_ASSERT(poll_ctx_ != nullptr);
poll_ctx_->Repoll();
}
@ -1547,6 +1583,7 @@ void ClientCallData::StartPromise(Flusher* flusher) {
promise_ = filter->MakeCallPromise(
CallArgs{WrapMetadata(send_initial_metadata_batch_->payload
->send_initial_metadata.send_initial_metadata),
std::move(initial_metadata_outstanding_token_),
server_initial_metadata_pipe() == nullptr
? nullptr
: &server_initial_metadata_pipe()->sender,
@ -1654,8 +1691,7 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
GPR_ASSERT(poll_ctx_ != nullptr);
GPR_ASSERT(send_initial_state_ == SendInitialState::kQueued);
send_initial_metadata_batch_->payload->send_initial_metadata
.send_initial_metadata =
UnwrapMetadata(std::move(call_args.client_initial_metadata));
.send_initial_metadata = call_args.client_initial_metadata.get();
if (recv_initial_metadata_ != nullptr) {
// Call args should contain a latch for receiving initial metadata.
// It might be the one we passed in - in which case we know this filter
@ -1867,8 +1903,15 @@ struct ServerCallData::SendInitialMetadata {
class ServerCallData::PollContext {
public:
explicit PollContext(ServerCallData* self, Flusher* flusher)
: self_(self), flusher_(flusher) {
explicit PollContext(ServerCallData* self, Flusher* flusher,
DebugLocation created = DebugLocation())
: self_(self), flusher_(flusher), created_(created) {
if (self_->poll_ctx_ != nullptr) {
Crash(absl::StrCat(
"PollContext: disallowed recursion. New: ", created_.file(), ":",
created_.line(), "; Old: ", self_->poll_ctx_->created_.file(), ":",
self_->poll_ctx_->created_.line()));
}
GPR_ASSERT(self_->poll_ctx_ == nullptr);
self_->poll_ctx_ = this;
scoped_activity_.Init(self_);
@ -1914,6 +1957,7 @@ class ServerCallData::PollContext {
Flusher* const flusher_;
bool repoll_ = false;
bool have_scoped_activity_;
GPR_NO_UNIQUE_ADDRESS DebugLocation created_;
};
const char* ServerCallData::StateString(RecvInitialState state) {
@ -1973,11 +2017,18 @@ ServerCallData::~ServerCallData() {
gpr_log(GPR_INFO, "%s ~ServerCallData %s", LogTag().c_str(),
DebugString().c_str());
}
if (send_initial_metadata_ != nullptr) {
send_initial_metadata_->~SendInitialMetadata();
}
GPR_ASSERT(poll_ctx_ == nullptr);
}
std::string ServerCallData::DebugTag() const {
return absl::StrFormat("PBF_SERVER[%p]: [%s] ", this, elem()->filter->name);
}
// Activity implementation.
void ServerCallData::ForceImmediateRepoll() {
void ServerCallData::ForceImmediateRepoll(WakeupMask) {
GPR_ASSERT(poll_ctx_ != nullptr);
poll_ctx_->Repoll();
}
@ -2083,7 +2134,10 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
switch (send_trailing_state_) {
case SendTrailingState::kInitial:
send_trailing_metadata_batch_ = batch;
if (receive_message() != nullptr) {
if (receive_message() != nullptr &&
batch->payload->send_trailing_metadata.send_trailing_metadata
->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
receive_message()->Done(
*batch->payload->send_trailing_metadata.send_trailing_metadata,
&flusher);
@ -2140,9 +2194,12 @@ void ServerCallData::Completed(grpc_error_handle error, Flusher* flusher) {
case SendTrailingState::kForwarded:
send_trailing_state_ = SendTrailingState::kCancelled;
if (!error.ok()) {
call_stack()->IncrementRefCount();
auto* batch = grpc_make_transport_stream_op(
NewClosure([call_combiner = call_combiner()](absl::Status) {
NewClosure([call_combiner = call_combiner(),
call_stack = call_stack()](absl::Status) {
GRPC_CALL_COMBINER_STOP(call_combiner, "done-cancel");
call_stack->Unref();
}));
batch->cancel_stream = true;
batch->payload->cancel_stream.cancel_error = error;
@ -2194,7 +2251,7 @@ void ServerCallData::Completed(grpc_error_handle error, Flusher* flusher) {
ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
CallArgs call_args) {
GPR_ASSERT(recv_initial_state_ == RecvInitialState::kComplete);
GPR_ASSERT(UnwrapMetadata(std::move(call_args.client_initial_metadata)) ==
GPR_ASSERT(std::move(call_args.client_initial_metadata).get() ==
recv_initial_metadata_);
forward_recv_initial_metadata_callback_ = true;
if (send_initial_metadata_ != nullptr) {
@ -2316,6 +2373,7 @@ void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
FakeActivity(this).Run([this, filter] {
promise_ = filter->MakeCallPromise(
CallArgs{WrapMetadata(recv_initial_metadata_),
ClientInitialMetadataOutstandingToken::Empty(),
server_initial_metadata_pipe() == nullptr
? nullptr
: &server_initial_metadata_pipe()->sender,
@ -2416,9 +2474,14 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
(send_trailing_metadata_batch_->send_message &&
send_message()->IsForwarded()))) {
send_trailing_state_ = SendTrailingState::kQueued;
send_message()->Done(*send_trailing_metadata_batch_->payload
->send_trailing_metadata.send_trailing_metadata,
flusher);
if (send_trailing_metadata_batch_->payload->send_trailing_metadata
.send_trailing_metadata->get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN) != GRPC_STATUS_OK) {
send_message()->Done(
*send_trailing_metadata_batch_->payload->send_trailing_metadata
.send_trailing_metadata,
flusher);
}
}
}
if (receive_message() != nullptr) {
@ -2469,8 +2532,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
}
if (auto* r = poll.value_if_ready()) {
promise_ = ArenaPromise<ServerMetadataHandle>();
auto* md = UnwrapMetadata(std::move(*r));
bool destroy_md = true;
auto md = std::move(*r);
if (send_message() != nullptr) {
send_message()->Done(*md, flusher);
}
@ -2482,11 +2544,9 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
case SendTrailingState::kQueuedButHaventClosedSends:
case SendTrailingState::kQueued: {
if (send_trailing_metadata_batch_->payload->send_trailing_metadata
.send_trailing_metadata != md) {
.send_trailing_metadata != md.get()) {
*send_trailing_metadata_batch_->payload->send_trailing_metadata
.send_trailing_metadata = std::move(*md);
} else {
destroy_md = false;
}
send_trailing_metadata_batch_.ResumeWith(flusher);
send_trailing_state_ = SendTrailingState::kForwarded;
@ -2504,9 +2564,6 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
// Nothing to do.
break;
}
if (destroy_md) {
md->~grpc_metadata_batch();
}
}
}
if (std::exchange(forward_recv_initial_metadata_callback_, false)) {

@ -184,7 +184,7 @@ class BaseCallData : public Activity, private Wakeable {
Waker MakeNonOwningWaker() final;
Waker MakeOwningWaker() final;
std::string ActivityDebugTag(void*) const override { return DebugTag(); }
std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
void Finalize(const grpc_call_final_info* final_info) {
finalization_.Run(final_info);
@ -222,7 +222,11 @@ class BaseCallData : public Activity, private Wakeable {
void Resume(grpc_transport_stream_op_batch* batch) {
GPR_ASSERT(!call_->is_last());
release_.push_back(batch);
if (batch->HasOp()) {
release_.push_back(batch);
} else if (batch->on_complete != nullptr) {
Complete(batch);
}
}
void Cancel(grpc_transport_stream_op_batch* batch,
@ -241,6 +245,8 @@ class BaseCallData : public Activity, private Wakeable {
call_closures_.Add(closure, error, reason);
}
BaseCallData* call() const { return call_; }
private:
absl::InlinedVector<grpc_transport_stream_op_batch*, 1> release_;
CallCombinerClosureList call_closures_;
@ -284,11 +290,6 @@ class BaseCallData : public Activity, private Wakeable {
Arena::PooledDeleter(nullptr));
}
static grpc_metadata_batch* UnwrapMetadata(
Arena::PoolPtr<grpc_metadata_batch> p) {
return p.release();
}
class ReceiveInterceptor final : public Interceptor {
public:
explicit ReceiveInterceptor(Arena* arena) : pipe_{arena} {}
@ -402,6 +403,8 @@ class BaseCallData : public Activity, private Wakeable {
kCancelledButNotYetPolled,
// We're done.
kCancelled,
// We're done, but we haven't gotten a status yet
kCancelledButNoStatus,
};
static const char* StateString(State);
@ -542,8 +545,8 @@ class BaseCallData : public Activity, private Wakeable {
private:
// Wakeable implementation.
void Wakeup(void*) final;
void Drop(void*) final;
void Wakeup(WakeupMask) final;
void Drop(WakeupMask) final;
virtual void OnWakeup() = 0;
@ -569,10 +572,12 @@ class ClientCallData : public BaseCallData {
~ClientCallData() override;
// Activity implementation.
void ForceImmediateRepoll() final;
void ForceImmediateRepoll(WakeupMask) final;
// Handle one grpc_transport_stream_op_batch
void StartBatch(grpc_transport_stream_op_batch* batch) override;
std::string DebugTag() const override;
private:
// At what stage is our handling of send initial metadata?
enum class SendInitialState {
@ -669,6 +674,8 @@ class ClientCallData : public BaseCallData {
RecvTrailingState recv_trailing_state_ = RecvTrailingState::kInitial;
// Polling related data. Non-null if we're actively polling
PollContext* poll_ctx_ = nullptr;
// Initial metadata outstanding token
ClientInitialMetadataOutstandingToken initial_metadata_outstanding_token_;
};
class ServerCallData : public BaseCallData {
@ -678,10 +685,12 @@ class ServerCallData : public BaseCallData {
~ServerCallData() override;
// Activity implementation.
void ForceImmediateRepoll() final;
void ForceImmediateRepoll(WakeupMask) final;
// Handle one grpc_transport_stream_op_batch
void StartBatch(grpc_transport_stream_op_batch* batch) override;
std::string DebugTag() const override;
protected:
absl::string_view ClientOrServerString() const override { return "SVR"; }

@ -69,7 +69,7 @@ inline OrphanablePtr<T> MakeOrphanable(Args&&... args) {
}
// A type of Orphanable with internal ref-counting.
template <typename Child, UnrefBehavior UnrefBehaviorArg = kUnrefDelete>
template <typename Child, typename UnrefBehavior = UnrefDelete>
class InternallyRefCounted : public Orphanable {
public:
// Not copyable nor movable.
@ -99,12 +99,12 @@ class InternallyRefCounted : public Orphanable {
void Unref() {
if (GPR_UNLIKELY(refs_.Unref())) {
internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
unref_behavior_(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
unref_behavior_(static_cast<Child*>(this));
}
}
@ -115,6 +115,7 @@ class InternallyRefCounted : public Orphanable {
}
RefCount refs_;
GPR_NO_UNIQUE_ADDRESS UnrefBehavior unref_behavior_;
};
} // namespace grpc_core

@ -213,41 +213,34 @@ class NonPolymorphicRefCount {
};
// Behavior of RefCounted<> upon ref count reaching 0.
enum UnrefBehavior {
// Default behavior: Delete the object.
kUnrefDelete,
// Do not delete the object upon unref. This is useful in cases where all
// existing objects must be tracked in a registry but the object's entry in
// the registry cannot be removed from the object's dtor due to
// synchronization issues. In this case, the registry can be cleaned up
// later by identifying entries for which RefIfNonZero() returns null.
kUnrefNoDelete,
// Call the object's dtor but do not delete it. This is useful for cases
// where the object is stored in memory allocated elsewhere (e.g., the call
// arena).
kUnrefCallDtor,
};
namespace internal {
template <typename T, UnrefBehavior UnrefBehaviorArg>
class Delete;
template <typename T>
class Delete<T, kUnrefDelete> {
public:
explicit Delete(T* t) { delete t; }
// Default behavior: Delete the object.
struct UnrefDelete {
template <typename T>
void operator()(T* p) {
delete p;
}
};
template <typename T>
class Delete<T, kUnrefNoDelete> {
public:
explicit Delete(T* /*t*/) {}
// Do not delete the object upon unref. This is useful in cases where all
// existing objects must be tracked in a registry but the object's entry in
// the registry cannot be removed from the object's dtor due to
// synchronization issues. In this case, the registry can be cleaned up
// later by identifying entries for which RefIfNonZero() returns null.
struct UnrefNoDelete {
template <typename T>
void operator()(T* /*p*/) {}
};
template <typename T>
class Delete<T, kUnrefCallDtor> {
public:
explicit Delete(T* t) { t->~T(); }
// Call the object's dtor but do not delete it. This is useful for cases
// where the object is stored in memory allocated elsewhere (e.g., the call
// arena).
struct UnrefCallDtor {
template <typename T>
void operator()(T* p) {
p->~T();
}
};
} // namespace internal
// A base class for reference-counted objects.
// New objects should be created via new and start with a refcount of 1.
@ -276,7 +269,7 @@ class Delete<T, kUnrefCallDtor> {
// ch->Unref();
//
template <typename Child, typename Impl = PolymorphicRefCount,
UnrefBehavior UnrefBehaviorArg = kUnrefDelete>
typename UnrefBehavior = UnrefDelete>
class RefCounted : public Impl {
public:
using RefCountedChildType = Child;
@ -301,12 +294,12 @@ class RefCounted : public Impl {
// friend of this class.
void Unref() {
if (GPR_UNLIKELY(refs_.Unref())) {
internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
unref_behavior_(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
internal::Delete<Child, UnrefBehaviorArg>(static_cast<Child*>(this));
unref_behavior_(static_cast<Child*>(this));
}
}
@ -331,6 +324,11 @@ class RefCounted : public Impl {
intptr_t initial_refcount = 1)
: refs_(initial_refcount, trace) {}
// Note: Tracing is a no-op on non-debug builds.
explicit RefCounted(UnrefBehavior b, const char* trace = nullptr,
intptr_t initial_refcount = 1)
: refs_(initial_refcount, trace), unref_behavior_(b) {}
private:
// Allow RefCountedPtr<> to access IncrementRefCount().
template <typename T>
@ -342,6 +340,7 @@ class RefCounted : public Impl {
}
RefCount refs_;
GPR_NO_UNIQUE_ADDRESS UnrefBehavior unref_behavior_;
};
} // namespace grpc_core

@ -25,6 +25,11 @@
#include <stddef.h>
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
#include <grpc/support/log.h>
namespace grpc_core {
@ -86,6 +91,17 @@ class Thread {
Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
bool* success = nullptr, const Options& options = Options());
Thread(const char* thd_name, absl::AnyInvocable<void()> fn,
bool* success = nullptr, const Options& options = Options())
: Thread(
thd_name,
[](void* p) {
std::unique_ptr<absl::AnyInvocable<void()>> fn_from_p(
static_cast<absl::AnyInvocable<void()>*>(p));
(*fn_from_p)();
},
new absl::AnyInvocable<void()>(std::move(fn)), success, options) {}
/// Move constructor for thread. After this is called, the other thread
/// no longer represents a living thread object
Thread(Thread&& other) noexcept

@ -171,8 +171,8 @@ class CallCombinerClosureList {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"CallCombinerClosureList executing closure while already "
"holding call_combiner %p: closure=%p error=%s reason=%s",
call_combiner, closures_[0].closure,
"holding call_combiner %p: closure=%s error=%s reason=%s",
call_combiner, closures_[0].closure->DebugString().c_str(),
StatusToString(closures_[0].error).c_str(), closures_[0].reason);
}
// This will release the call combiner.

@ -19,8 +19,11 @@
#include <stddef.h>
#include <initializer_list>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "src/core/lib/gprpp/atomic_utils.h"
@ -36,7 +39,9 @@ namespace promise_detail {
///////////////////////////////////////////////////////////////////////////////
// HELPER TYPES
std::string Unwakeable::ActivityDebugTag(void*) const { return "<unknown>"; }
std::string Unwakeable::ActivityDebugTag(WakeupMask) const {
return "<unknown>";
}
// Weak handle to an Activity.
// Handle can persist while Activity goes away.
@ -58,7 +63,7 @@ class FreestandingActivity::Handle final : public Wakeable {
// Activity needs to wake up (if it still exists!) - wake it up, and drop the
// ref that was kept for this handle.
void Wakeup(void*) override ABSL_LOCKS_EXCLUDED(mu_) {
void Wakeup(WakeupMask) override ABSL_LOCKS_EXCLUDED(mu_) {
mu_.Lock();
// Note that activity refcount can drop to zero, but we could win the lock
// against DropActivity, so we need to only increase activities refcount if
@ -68,7 +73,7 @@ class FreestandingActivity::Handle final : public Wakeable {
mu_.Unlock();
// Activity still exists and we have a reference: wake it up, which will
// drop the ref.
activity->Wakeup(nullptr);
activity->Wakeup(0);
} else {
// Could not get the activity - it's either gone or going. No need to wake
// it up!
@ -78,9 +83,9 @@ class FreestandingActivity::Handle final : public Wakeable {
Unref();
}
void Drop(void*) override { Unref(); }
void Drop(WakeupMask) override { Unref(); }
std::string ActivityDebugTag(void*) const override {
std::string ActivityDebugTag(WakeupMask) const override {
MutexLock lock(&mu_);
return activity_ == nullptr ? "<unknown>" : activity_->DebugTag();
}
@ -124,7 +129,7 @@ void FreestandingActivity::DropHandle() {
Waker FreestandingActivity::MakeNonOwningWaker() {
mu_.AssertHeld();
return Waker(RefHandle(), nullptr);
return Waker(RefHandle(), 0);
}
} // namespace promise_detail
@ -133,4 +138,15 @@ std::string Activity::DebugTag() const {
return absl::StrFormat("ACTIVITY[%p]", this);
}
///////////////////////////////////////////////////////////////////////////////
// INTRA ACTIVITY WAKER IMPLEMENTATION
std::string IntraActivityWaiter::DebugString() const {
std::vector<int> bits;
for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
if (wakeups_ & (1 << i)) bits.push_back(i);
}
return absl::StrCat("{", absl::StrJoin(bits, ","), "}");
}
} // namespace grpc_core

@ -38,24 +38,29 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/promise/detail/status.h"
#include "src/core/lib/promise/poll.h"
namespace grpc_core {
class Activity;
// WakeupMask is a bitfield representing which parts of an activity should be
// woken up.
using WakeupMask = uint16_t;
// A Wakeable object is used by queues to wake activities.
class Wakeable {
public:
// Wake up the underlying activity.
// After calling, this Wakeable cannot be used again.
// arg comes from the Waker object and allows one Wakeable instance to be used
// for multiple disjoint subparts of an Activity.
virtual void Wakeup(void* arg) = 0;
// WakeupMask comes from the activity that created this Wakeable and specifies
// the set of promises that should be awoken.
virtual void Wakeup(WakeupMask wakeup_mask) = 0;
// Drop this wakeable without waking up the underlying activity.
virtual void Drop(void* arg) = 0;
virtual void Drop(WakeupMask wakeup_mask) = 0;
// Return the underlying activity debug tag, or "<unknown>" if not available.
virtual std::string ActivityDebugTag(void* arg) const = 0;
virtual std::string ActivityDebugTag(WakeupMask wakeup_mask) const = 0;
protected:
inline ~Wakeable() {}
@ -63,9 +68,9 @@ class Wakeable {
namespace promise_detail {
struct Unwakeable final : public Wakeable {
void Wakeup(void*) override {}
void Drop(void*) override {}
std::string ActivityDebugTag(void*) const override;
void Wakeup(WakeupMask) override {}
void Drop(WakeupMask) override {}
std::string ActivityDebugTag(WakeupMask) const override;
};
static Unwakeable* unwakeable() {
return NoDestructSingleton<Unwakeable>::Get();
@ -76,8 +81,9 @@ static Unwakeable* unwakeable() {
// This type is non-copyable but movable.
class Waker {
public:
Waker(Wakeable* wakeable, void* arg) : wakeable_and_arg_{wakeable, arg} {}
Waker() : Waker(promise_detail::unwakeable(), nullptr) {}
Waker(Wakeable* wakeable, WakeupMask wakeup_mask)
: wakeable_and_arg_{wakeable, wakeup_mask} {}
Waker() : Waker(promise_detail::unwakeable(), 0) {}
~Waker() { wakeable_and_arg_.Drop(); }
Waker(const Waker&) = delete;
Waker& operator=(const Waker&) = delete;
@ -93,7 +99,7 @@ class Waker {
template <typename H>
friend H AbslHashValue(H h, const Waker& w) {
return H::combine(H::combine(std::move(h), w.wakeable_and_arg_.wakeable),
w.wakeable_and_arg_.arg);
w.wakeable_and_arg_.wakeup_mask);
}
bool operator==(const Waker& other) const noexcept {
@ -116,27 +122,42 @@ class Waker {
private:
struct WakeableAndArg {
Wakeable* wakeable;
void* arg;
WakeupMask wakeup_mask;
void Wakeup() { wakeable->Wakeup(arg); }
void Drop() { wakeable->Drop(arg); }
void Wakeup() { wakeable->Wakeup(wakeup_mask); }
void Drop() { wakeable->Drop(wakeup_mask); }
std::string ActivityDebugTag() const {
return wakeable == nullptr ? "<unknown>"
: wakeable->ActivityDebugTag(arg);
: wakeable->ActivityDebugTag(wakeup_mask);
}
bool operator==(const WakeableAndArg& other) const noexcept {
return wakeable == other.wakeable && arg == other.arg;
return wakeable == other.wakeable && wakeup_mask == other.wakeup_mask;
}
};
WakeableAndArg Take() {
return std::exchange(wakeable_and_arg_,
{promise_detail::unwakeable(), nullptr});
return std::exchange(wakeable_and_arg_, {promise_detail::unwakeable(), 0});
}
WakeableAndArg wakeable_and_arg_;
};
// Helper type to track wakeups between objects in the same activity.
// Can be fairly fast as no ref counting or locking needs to occur.
class IntraActivityWaiter {
public:
// Register for wakeup, return Pending(). If state is not ready to proceed,
// Promises should bottom out here.
Pending pending();
// Wake the activity
void Wake();
std::string DebugString() const;
private:
WakeupMask wakeups_ = 0;
};
// An Activity tracks execution of a single promise.
// It executes the promise under a mutex.
// When the promise stalls, it registers the containing activity to be woken up
@ -156,7 +177,13 @@ class Activity : public Orphanable {
void ForceWakeup() { MakeOwningWaker().Wakeup(); }
// Force the current activity to immediately repoll if it doesn't complete.
virtual void ForceImmediateRepoll() = 0;
virtual void ForceImmediateRepoll(WakeupMask mask) = 0;
// Legacy version of ForceImmediateRepoll() that uses the current participant.
// Will go away once Party gets merged with Activity. New usage is banned.
void ForceImmediateRepoll() { ForceImmediateRepoll(CurrentParticipant()); }
// Return the current part of the activity as a bitmask
virtual WakeupMask CurrentParticipant() const { return 1; }
// Return the current activity.
// Additionally:
@ -284,7 +311,7 @@ class FreestandingActivity : public Activity, private Wakeable {
public:
Waker MakeOwningWaker() final {
Ref();
return Waker(this, nullptr);
return Waker(this, 0);
}
Waker MakeNonOwningWaker() final;
@ -293,7 +320,7 @@ class FreestandingActivity : public Activity, private Wakeable {
Unref();
}
void ForceImmediateRepoll() final {
void ForceImmediateRepoll(WakeupMask) final {
mu_.AssertHeld();
SetActionDuringRun(ActionDuringRun::kWakeup);
}
@ -333,7 +360,7 @@ class FreestandingActivity : public Activity, private Wakeable {
Mutex* mu() ABSL_LOCK_RETURNED(mu_) { return &mu_; }
std::string ActivityDebugTag(void*) const override { return DebugTag(); }
std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
private:
class Handle;
@ -467,7 +494,7 @@ class PromiseActivity final
// the activity to an external threadpool to run. If the activity is already
// running on this thread, a note is taken of such and the activity is
// repolled if it doesn't complete.
void Wakeup(void*) final {
void Wakeup(WakeupMask) final {
// If there is an active activity, but hey it's us, flag that and we'll loop
// in RunLoop (that's calling from above here!).
if (Activity::is_current()) {
@ -486,7 +513,7 @@ class PromiseActivity final
}
// Drop a wakeup
void Drop(void*) final { this->WakeupComplete(); }
void Drop(WakeupMask) final { this->WakeupComplete(); }
// Notification that we're no longer executing - it's ok to destruct the
// promise.
@ -593,6 +620,16 @@ ActivityPtr MakeActivity(Factory promise_factory,
std::move(on_done), std::forward<Contexts>(contexts)...));
}
inline Pending IntraActivityWaiter::pending() {
wakeups_ |= Activity::current()->CurrentParticipant();
return Pending();
}
inline void IntraActivityWaiter::Wake() {
if (wakeups_ == 0) return;
Activity::current()->ForceImmediateRepoll(std::exchange(wakeups_, 0));
}
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_ACTIVITY_H

@ -33,10 +33,20 @@ struct ContextType; // IWYU pragma: keep
namespace promise_detail {
struct KeepExistingIfPresent {};
template <typename T>
class Context : public ContextType<T> {
public:
explicit Context(T* p) : old_(current_) { current_ = p; }
// HACKY, try to remove.
// If a context is present, then don't override it during context
// initialization.
// Currently used to keep BatchBuilder across multiple ops in Call StartBatch,
// but we should be able to drop this once we have promise based transports.
Context(KeepExistingIfPresent, T* p) : old_(current_) {
if (current_ == nullptr) current_ = p;
}
~Context() { current_ = old_; }
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;

@ -17,6 +17,7 @@
#include <grpc/support/port_platform.h>
#include <memory>
#include <type_traits>
#include <utility>
@ -106,6 +107,9 @@ class Curried {
private:
GPR_NO_UNIQUE_ADDRESS F f_;
GPR_NO_UNIQUE_ADDRESS Arg arg_;
#ifndef NDEBUG
std::unique_ptr<int> asan_canary_ = std::make_unique<int>(0);
#endif
};
// Promote a callable(A) -> T | Poll<T> to a PromiseFactory(A) -> Promise<T> by

@ -17,7 +17,9 @@
#include <grpc/support/port_platform.h>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
@ -162,6 +164,9 @@ class If<bool, T, F> {
}
Poll<Result> operator()() {
#ifndef NDEBUG
asan_canary_ = std::make_unique<int>(1 + *asan_canary_);
#endif
if (condition_) {
return if_true_();
} else {
@ -175,6 +180,10 @@ class If<bool, T, F> {
TruePromise if_true_;
FalsePromise if_false_;
};
// Make failure to destruct show up in ASAN builds.
#ifndef NDEBUG
std::unique_ptr<int> asan_canary_ = std::make_unique<int>(0);
#endif
};
} // namespace promise_detail

@ -89,6 +89,10 @@ class InterceptorList {
public:
RunPromise(size_t memory_required, Map* factory, absl::optional<T> value) {
if (!value.has_value() || factory == nullptr) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG,
"InterceptorList::RunPromise[%p]: create immediate", this);
}
is_immediately_resolved_ = true;
Construct(&result_, std::move(value));
} else {
@ -96,10 +100,18 @@ class InterceptorList {
Construct(&async_resolution_, memory_required);
factory->MakePromise(std::move(*value), async_resolution_.space.get());
async_resolution_.current_factory = factory;
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG,
"InterceptorList::RunPromise[%p]: create async; mem=%p", this,
async_resolution_.space.get());
}
}
}
~RunPromise() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: destroy", this);
}
if (is_immediately_resolved_) {
Destruct(&result_);
} else {
@ -116,6 +128,10 @@ class InterceptorList {
RunPromise(RunPromise&& other) noexcept
: is_immediately_resolved_(other.is_immediately_resolved_) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: move from %p",
this, &other);
}
if (is_immediately_resolved_) {
Construct(&result_, std::move(other.result_));
} else {
@ -127,7 +143,7 @@ class InterceptorList {
Poll<absl::optional<T>> operator()() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "InterceptorList::RunPromise: %s",
gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: %s", this,
DebugString().c_str());
}
if (is_immediately_resolved_) return std::move(result_);
@ -139,7 +155,12 @@ class InterceptorList {
async_resolution_.space.get());
async_resolution_.current_factory =
async_resolution_.current_factory->next();
if (async_resolution_.current_factory == nullptr || !p->has_value()) {
if (!p->has_value()) async_resolution_.current_factory = nullptr;
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "InterceptorList::RunPromise[%p]: %s", this,
DebugString().c_str());
}
if (async_resolution_.current_factory == nullptr) {
return std::move(*p);
}
async_resolution_.current_factory->MakePromise(

@ -1,55 +0,0 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H
#define GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H
#include <grpc/support/port_platform.h>
#include <string>
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/poll.h"
namespace grpc_core {
// Helper type to track wakeups between objects in the same activity.
// Can be fairly fast as no ref counting or locking needs to occur.
class IntraActivityWaiter {
public:
// Register for wakeup, return Pending(). If state is not ready to proceed,
// Promises should bottom out here.
Pending pending() {
waiting_ = true;
return Pending();
}
// Wake the activity
void Wake() {
if (waiting_) {
waiting_ = false;
Activity::current()->ForceImmediateRepoll();
}
}
std::string DebugString() const {
return waiting_ ? "WAITING" : "NOT_WAITING";
}
private:
bool waiting_ = false;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_INTRA_ACTIVITY_WAITER_H

@ -19,6 +19,7 @@
#include <stdint.h>
#include <atomic>
#include <string>
#include <type_traits>
#include <utility>
@ -29,7 +30,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/intra_activity_waiter.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/trace.h"
@ -61,13 +61,14 @@ class Latch {
}
// Produce a promise to wait for a value from this latch.
// Moves the result out of the latch.
auto Wait() {
#ifndef NDEBUG
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sPollWait %s", DebugTag().c_str(),
gpr_log(GPR_INFO, "%sWait %s", DebugTag().c_str(),
StateString().c_str());
}
if (has_value_) {
@ -78,6 +79,25 @@ class Latch {
};
}
// Produce a promise to wait for a value from this latch.
// Copies the result out of the latch.
auto WaitAndCopy() {
#ifndef NDEBUG
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sWaitAndCopy %s", DebugTag().c_str(),
StateString().c_str());
}
if (has_value_) {
return value_;
} else {
return waiter_.pending();
}
};
}
// Set the value of the latch. Can only be called once.
void Set(T value) {
if (grpc_trace_promise_primitives.enabled()) {
@ -89,6 +109,8 @@ class Latch {
waiter_.Wake();
}
bool is_set() const { return has_value_; }
private:
std::string DebugTag() {
return absl::StrCat(Activity::current()->DebugTag(), " LATCH[0x",
@ -165,7 +187,7 @@ class Latch<void> {
private:
std::string DebugTag() {
return absl::StrCat(Activity::current()->DebugTag(), " LATCH[0x",
return absl::StrCat(Activity::current()->DebugTag(), " LATCH(void)[0x",
reinterpret_cast<uintptr_t>(this), "]: ");
}
@ -183,6 +205,70 @@ class Latch<void> {
IntraActivityWaiter waiter_;
};
// A Latch that can have its value observed by outside threads, but only waited
// upon from inside a single activity.
template <typename T>
class ExternallyObservableLatch;
template <>
class ExternallyObservableLatch<void> {
public:
ExternallyObservableLatch() = default;
ExternallyObservableLatch(const ExternallyObservableLatch&) = delete;
ExternallyObservableLatch& operator=(const ExternallyObservableLatch&) =
delete;
// Produce a promise to wait for this latch.
auto Wait() {
return [this]() -> Poll<Empty> {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sPollWait %s", DebugTag().c_str(),
StateString().c_str());
}
if (IsSet()) {
return Empty{};
} else {
return waiter_.pending();
}
};
}
// Set the latch.
void Set() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sSet %s", DebugTag().c_str(), StateString().c_str());
}
is_set_.store(true, std::memory_order_relaxed);
waiter_.Wake();
}
bool IsSet() const { return is_set_.load(std::memory_order_relaxed); }
void Reset() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%sReset %s", DebugTag().c_str(),
StateString().c_str());
}
is_set_.store(false, std::memory_order_relaxed);
}
private:
std::string DebugTag() {
return absl::StrCat(Activity::current()->DebugTag(), " LATCH(void)[0x",
reinterpret_cast<uintptr_t>(this), "]: ");
}
std::string StateString() {
return absl::StrCat(
"is_set:", is_set_.load(std::memory_order_relaxed) ? "true" : "false",
" waiter:", waiter_.DebugString());
}
// True if we have a value set, false otherwise.
std::atomic<bool> is_set_{false};
IntraActivityWaiter waiter_;
};
template <typename T>
using LatchWaitPromise = decltype(std::declval<Latch<T>>().Wait());

@ -17,14 +17,13 @@
#include <grpc/support/port_platform.h>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
#include "src/core/lib/gprpp/construct_destruct.h"
#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/promise/poll.h"
@ -83,17 +82,21 @@ class Loop {
public:
using Result = typename LoopTraits<PromiseResult>::Result;
explicit Loop(F f) : factory_(std::move(f)), promise_(factory_.Make()) {}
~Loop() { promise_.~PromiseType(); }
explicit Loop(F f) : factory_(std::move(f)) {}
~Loop() {
if (started_) Destruct(&promise_);
}
Loop(Loop&& loop) noexcept
: factory_(std::move(loop.factory_)),
promise_(std::move(loop.promise_)) {}
Loop(Loop&& loop) noexcept : factory_(std::move(loop.factory_)) {}
Loop(const Loop& loop) = delete;
Loop& operator=(const Loop& loop) = delete;
Poll<Result> operator()() {
if (!started_) {
started_ = true;
Construct(&promise_, factory_.Make());
}
while (true) {
// Poll the inner promise.
auto promise_result = promise_();
@ -103,8 +106,8 @@ class Loop {
// from our factory.
auto lc = LoopTraits<PromiseResult>::ToLoopCtl(*p);
if (absl::holds_alternative<Continue>(lc)) {
promise_.~PromiseType();
new (&promise_) PromiseType(factory_.Make());
Destruct(&promise_);
Construct(&promise_, factory_.Make());
continue;
}
// - otherwise there's our result... return it out.
@ -121,6 +124,7 @@ class Loop {
GPR_NO_UNIQUE_ADDRESS union {
GPR_NO_UNIQUE_ADDRESS PromiseType promise_;
};
bool started_ = false;
};
} // namespace promise_detail

@ -39,6 +39,13 @@ class Map {
Map(Promise promise, Fn fn)
: promise_(std::move(promise)), fn_(std::move(fn)) {}
Map(const Map&) = delete;
Map& operator=(const Map&) = delete;
// NOLINTNEXTLINE(performance-noexcept-move-constructor): clang6 bug
Map(Map&& other) = default;
// NOLINTNEXTLINE(performance-noexcept-move-constructor): clang6 bug
Map& operator=(Map&& other) = default;
using PromiseResult = typename PromiseLike<Promise>::Result;
using Result =
RemoveCVRef<decltype(std::declval<Fn>()(std::declval<PromiseResult>()))>;

@ -1,295 +0,0 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H
#define GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include <limits>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/types/optional.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/detail/promise_like.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/wait_set.h"
namespace grpc_core {
namespace promise_detail {
using ObservableVersion = uint64_t;
static constexpr ObservableVersion kTombstoneVersion =
std::numeric_limits<ObservableVersion>::max();
} // namespace promise_detail
class WatchCommitter {
public:
void Commit() { version_seen_ = promise_detail::kTombstoneVersion; }
protected:
promise_detail::ObservableVersion version_seen_ = 0;
};
namespace promise_detail {
// Shared state between Observable and Observer.
template <typename T>
class ObservableState {
public:
explicit ObservableState(absl::optional<T> value)
: value_(std::move(value)) {}
// Publish that we're closed.
void Close() {
mu_.Lock();
version_ = kTombstoneVersion;
value_.reset();
auto wakeup = waiters_.TakeWakeupSet();
mu_.Unlock();
wakeup.Wakeup();
}
// Synchronously publish a new value, and wake any waiters.
void Push(T value) {
mu_.Lock();
version_++;
value_ = std::move(value);
auto wakeup = waiters_.TakeWakeupSet();
mu_.Unlock();
wakeup.Wakeup();
}
Poll<absl::optional<T>> PollGet(ObservableVersion* version_seen) {
MutexLock lock(&mu_);
if (!Started()) return Pending();
*version_seen = version_;
return value_;
}
Poll<absl::optional<T>> PollNext(ObservableVersion* version_seen) {
MutexLock lock(&mu_);
if (!NextValueReady(version_seen)) return Pending();
return value_;
}
Poll<absl::optional<T>> PollWatch(ObservableVersion* version_seen) {
if (*version_seen == kTombstoneVersion) return Pending();
MutexLock lock(&mu_);
if (!NextValueReady(version_seen)) return Pending();
// Watch needs to be woken up if the value changes even if it's ready now.
waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
return value_;
}
private:
// Returns true if an initial value is set.
// If one is not set, add ourselves as pending to waiters_, and return false.
bool Started() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!value_.has_value()) {
if (version_ != kTombstoneVersion) {
// We allow initial no-value, which does not indicate closure.
waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
return false;
}
}
return true;
}
// If no value is ready, add ourselves as pending to waiters_ and return
// false.
// If the next value is ready, update the last version seen and return true.
bool NextValueReady(ObservableVersion* version_seen)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!Started()) return false;
if (version_ == *version_seen) {
waiters_.AddPending(Activity::current()->MakeNonOwningWaker());
return false;
}
*version_seen = version_;
return true;
}
Mutex mu_;
WaitSet waiters_ ABSL_GUARDED_BY(mu_);
ObservableVersion version_ ABSL_GUARDED_BY(mu_) = 1;
absl::optional<T> value_ ABSL_GUARDED_BY(mu_);
};
// Promise implementation for Observer::Get.
template <typename T>
class ObservableGet {
public:
ObservableGet(ObservableVersion* version_seen, ObservableState<T>* state)
: version_seen_(version_seen), state_(state) {}
Poll<absl::optional<T>> operator()() {
return state_->PollGet(version_seen_);
}
private:
ObservableVersion* version_seen_;
ObservableState<T>* state_;
};
// Promise implementation for Observer::Next.
template <typename T>
class ObservableNext {
public:
ObservableNext(ObservableVersion* version_seen, ObservableState<T>* state)
: version_seen_(version_seen), state_(state) {}
Poll<absl::optional<T>> operator()() {
return state_->PollNext(version_seen_);
}
private:
ObservableVersion* version_seen_;
ObservableState<T>* state_;
};
template <typename T, typename F>
class ObservableWatch final : private WatchCommitter {
private:
using Promise = PromiseLike<decltype(std::declval<F>()(
std::declval<T>(), std::declval<WatchCommitter*>()))>;
using Result = typename Promise::Result;
public:
explicit ObservableWatch(F factory, std::shared_ptr<ObservableState<T>> state)
: state_(std::move(state)), factory_(std::move(factory)) {}
ObservableWatch(const ObservableWatch&) = delete;
ObservableWatch& operator=(const ObservableWatch&) = delete;
ObservableWatch(ObservableWatch&& other) noexcept
: state_(std::move(other.state_)),
promise_(std::move(other.promise_)),
factory_(std::move(other.factory_)) {}
ObservableWatch& operator=(ObservableWatch&&) noexcept = default;
Poll<Result> operator()() {
auto r = state_->PollWatch(&version_seen_);
if (auto* p = r.value_if_ready()) {
if (p->has_value()) {
promise_ = Promise(factory_(std::move(**p), this));
} else {
promise_ = {};
}
}
if (promise_.has_value()) {
return (*promise_)();
} else {
return Pending();
}
}
private:
std::shared_ptr<ObservableState<T>> state_;
absl::optional<Promise> promise_;
F factory_;
};
} // namespace promise_detail
template <typename T>
class Observable;
// Observer watches an Observable for updates.
// It can see either the latest value or wait for a new value, but is not
// guaranteed to see every value pushed to the Observable.
template <typename T>
class Observer {
public:
Observer(const Observer&) = delete;
Observer& operator=(const Observer&) = delete;
Observer(Observer&& other) noexcept
: version_seen_(other.version_seen_), state_(std::move(other.state_)) {}
Observer& operator=(Observer&& other) noexcept {
version_seen_ = other.version_seen_;
state_ = std::move(other.state_);
return *this;
}
// Return a promise that will produce an optional<T>.
// If the Observable is still present, this will be a value T, but if the
// Observable has been closed, this will be nullopt. Borrows data from the
// Observer, so this value must stay valid until the promise is resolved. Only
// one Next, Get call is allowed to be outstanding at a time.
promise_detail::ObservableGet<T> Get() {
return promise_detail::ObservableGet<T>{&version_seen_, &*state_};
}
// Return a promise that will produce the next unseen value as an optional<T>.
// If the Observable is still present, this will be a value T, but if the
// Observable has been closed, this will be nullopt. Borrows data from the
// Observer, so this value must stay valid until the promise is resolved. Only
// one Next, Get call is allowed to be outstanding at a time.
promise_detail::ObservableNext<T> Next() {
return promise_detail::ObservableNext<T>{&version_seen_, &*state_};
}
private:
using State = promise_detail::ObservableState<T>;
friend class Observable<T>;
explicit Observer(std::shared_ptr<State> state) : state_(state) {}
promise_detail::ObservableVersion version_seen_ = 0;
std::shared_ptr<State> state_;
};
// Observable models a single writer multiple reader broadcast channel.
// Readers can observe the latest value, or await a new latest value, but they
// are not guaranteed to observe every value.
template <typename T>
class Observable {
public:
Observable() : state_(std::make_shared<State>(absl::nullopt)) {}
explicit Observable(T value)
: state_(std::make_shared<State>(std::move(value))) {}
~Observable() { state_->Close(); }
Observable(const Observable&) = delete;
Observable& operator=(const Observable&) = delete;
// Push a new value into the observable.
void Push(T value) { state_->Push(std::move(value)); }
// Create a new Observer - which can pull the current state from this
// Observable.
Observer<T> MakeObserver() { return Observer<T>(state_); }
// Create a new Watch - a promise that pushes state into the passed in promise
// factory. The promise factory takes two parameters - the current value and a
// commit token. If the commit token is used (the Commit function on it is
// called), then no further Watch updates are provided.
template <typename F>
promise_detail::ObservableWatch<T, F> Watch(F f) {
return promise_detail::ObservableWatch<T, F>(std::move(f), state_);
}
private:
using State = promise_detail::ObservableState<T>;
std::shared_ptr<State> state_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_PROMISE_OBSERVABLE_H

@ -21,8 +21,6 @@
#include <algorithm>
#include <atomic>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
@ -37,6 +35,13 @@
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/trace.h"
// #define GRPC_PARTY_MAXIMIZE_THREADS
#ifdef GRPC_PARTY_MAXIMIZE_THREADS
#include "src/core/lib/gprpp/thd.h" // IWYU pragma: keep
#include "src/core/lib/iomgr/exec_ctx.h" // IWYU pragma: keep
#endif
namespace grpc_core {
// Weak handle to a Party.
@ -59,7 +64,7 @@ class Party::Handle final : public Wakeable {
// Activity needs to wake up (if it still exists!) - wake it up, and drop the
// ref that was kept for this handle.
void Wakeup(void* arg) override ABSL_LOCKS_EXCLUDED(mu_) {
void Wakeup(WakeupMask wakeup_mask) override ABSL_LOCKS_EXCLUDED(mu_) {
mu_.Lock();
// Note that activity refcount can drop to zero, but we could win the lock
// against DropActivity, so we need to only increase activities refcount if
@ -69,7 +74,7 @@ class Party::Handle final : public Wakeable {
mu_.Unlock();
// Activity still exists and we have a reference: wake it up, which will
// drop the ref.
party->Wakeup(reinterpret_cast<void*>(arg));
party->Wakeup(wakeup_mask);
} else {
// Could not get the activity - it's either gone or going. No need to wake
// it up!
@ -79,9 +84,9 @@ class Party::Handle final : public Wakeable {
Unref();
}
void Drop(void*) override { Unref(); }
void Drop(WakeupMask) override { Unref(); }
std::string ActivityDebugTag(void*) const override {
std::string ActivityDebugTag(WakeupMask) const override {
MutexLock lock(&mu_);
return party_ == nullptr ? "<unknown>" : party_->DebugTag();
}
@ -116,14 +121,16 @@ Party::Participant::~Participant() {
}
}
Party::~Party() {
participants_.clear();
arena_->Destroy();
}
void Party::Orphan() { Unref(); }
Party::~Party() {}
void Party::Ref() { state_.fetch_add(kOneRef, std::memory_order_relaxed); }
void Party::IncrementRefCount(DebugLocation whence) {
auto prev_state = state_.fetch_add(kOneRef, std::memory_order_relaxed);
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "%s[party] Ref: prev_state=%s from %s:%d",
DebugTag().c_str(), StateToString(prev_state).c_str(),
whence.file(), whence.line());
}
}
bool Party::RefIfNonZero() {
auto count = state_.load(std::memory_order_relaxed);
@ -140,56 +147,120 @@ bool Party::RefIfNonZero() {
return true;
}
void Party::Unref() {
auto prev = state_.fetch_sub(kOneRef, std::memory_order_acq_rel);
if (prev == kOneRef) {
delete this;
void Party::Unref(DebugLocation whence) {
uint64_t prev_state;
auto do_unref = [&prev_state, this]() {
prev_state = state_.fetch_sub(kOneRef, std::memory_order_acq_rel);
};
if (grpc_trace_promise_primitives.enabled()) {
auto debug_tag = DebugTag();
do_unref();
gpr_log(GPR_DEBUG, "%s[party] Unref: prev_state=%s from %s:%d",
debug_tag.c_str(), StateToString(prev_state).c_str(), whence.file(),
whence.line());
} else {
do_unref();
}
if ((prev_state & kRefMask) == kOneRef) {
prev_state =
state_.fetch_or(kDestroying | kLocked, std::memory_order_acq_rel);
if (prev_state & kLocked) {
// Already locked: RunParty will call PartyOver.
} else {
ScopedActivity activity(this);
PartyOver();
}
return;
}
GPR_DEBUG_ASSERT((prev & kRefMask) != 0);
}
std::string Party::ActivityDebugTag(void* arg) const {
return absl::StrFormat("%s/%p", DebugTag(), arg);
void Party::CancelRemainingParticipants() {
ScopedActivity activity(this);
promise_detail::Context<Arena> arena_ctx(arena_);
for (size_t i = 0; i < kMaxParticipants; i++) {
if (auto* p =
participants_[i].exchange(nullptr, std::memory_order_acquire)) {
p->Destroy();
}
}
}
std::string Party::ActivityDebugTag(WakeupMask wakeup_mask) const {
return absl::StrFormat("%s [parts:%x]", DebugTag(), wakeup_mask);
}
Waker Party::MakeOwningWaker() {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
Ref();
return Waker(this, reinterpret_cast<void*>(currently_polling_));
IncrementRefCount();
return Waker(this, 1u << currently_polling_);
}
Waker Party::MakeNonOwningWaker() {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
return Waker(participants_[currently_polling_]->MakeNonOwningWakeable(this),
reinterpret_cast<void*>(currently_polling_));
return Waker(participants_[currently_polling_]
.load(std::memory_order_relaxed)
->MakeNonOwningWakeable(this),
1u << currently_polling_);
}
void Party::ForceImmediateRepoll() {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
void Party::ForceImmediateRepoll(WakeupMask mask) {
GPR_DEBUG_ASSERT(is_current());
// Or in the bit for the currently polling participant.
// Will be grabbed next round to force a repoll of this promise.
state_.fetch_or(1 << currently_polling_, std::memory_order_relaxed);
auto prev_state =
state_.fetch_or(mask & kWakeupMask, std::memory_order_relaxed);
if (grpc_trace_promise_primitives.enabled()) {
std::vector<int> wakeups;
for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
if (mask & (1 << i)) wakeups.push_back(i);
}
gpr_log(GPR_DEBUG, "%s[party] ForceImmediateRepoll({%s}): prev_state=%s",
DebugTag().c_str(), absl::StrJoin(wakeups, ",").c_str(),
StateToString(prev_state).c_str());
}
}
void Party::Run() {
void Party::RunLocked() {
auto body = [this]() {
if (RunParty()) {
ScopedActivity activity(this);
PartyOver();
}
};
#ifdef GRPC_PARTY_MAXIMIZE_THREADS
Thread thd(
"RunParty",
[body]() {
ApplicationCallbackExecCtx app_exec_ctx;
ExecCtx exec_ctx;
body();
},
nullptr, Thread::Options().set_joinable(false));
thd.Start();
#else
body();
#endif
}
bool Party::RunParty() {
ScopedActivity activity(this);
promise_detail::Context<Arena> arena_ctx(arena_);
uint64_t prev_state;
do {
// Grab the current state, and clear the wakeup bits & add flag.
prev_state =
state_.fetch_and(kRefMask | kLocked, std::memory_order_acquire);
prev_state = state_.fetch_and(kRefMask | kLocked | kAllocatedMask,
std::memory_order_acquire);
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "Party::Run(): prev_state=%s",
gpr_log(GPR_DEBUG, "%s[party] Run prev_state=%s", DebugTag().c_str(),
StateToString(prev_state).c_str());
}
GPR_ASSERT(prev_state & kLocked);
if (prev_state & kDestroying) return true;
// From the previous state, extract which participants we're to wakeup.
uint64_t wakeups = prev_state & kWakeupMask;
// If there were adds pending, drain them.
// We pass in wakeups here so that the new participants are polled
// immediately (draining will situate them).
if (prev_state & kAddsPending) DrainAdds(wakeups);
// Now update prev_state to be what we want the CAS to see below.
prev_state &= kRefMask | kLocked;
prev_state &= kRefMask | kLocked | kAllocatedMask;
// For each wakeup bit...
for (size_t i = 0; wakeups != 0; i++, wakeups >>= 1) {
// If the bit is not set, skip.
@ -197,10 +268,35 @@ void Party::Run() {
// If the participant is null, skip.
// This allows participants to complete whilst wakers still exist
// somewhere.
if (participants_[i] == nullptr) continue;
auto* participant = participants_[i].load(std::memory_order_acquire);
if (participant == nullptr) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "%s[party] wakeup %" PRIdPTR " already complete",
DebugTag().c_str(), i);
}
continue;
}
absl::string_view name;
if (grpc_trace_promise_primitives.enabled()) {
name = participant->name();
gpr_log(GPR_DEBUG, "%s[%s] begin job %" PRIdPTR, DebugTag().c_str(),
std::string(name).c_str(), i);
}
// Poll the participant.
currently_polling_ = i;
if (participants_[i]->Poll()) participants_[i].reset();
if (participant->Poll()) {
if (!name.empty()) {
gpr_log(GPR_DEBUG, "%s[%s] end poll and finish job %" PRIdPTR,
DebugTag().c_str(), std::string(name).c_str(), i);
}
participants_[i] = nullptr;
const uint64_t allocated_bit = (1u << i << kAllocatedShift);
prev_state &= ~allocated_bit;
state_.fetch_and(~allocated_bit, std::memory_order_release);
} else if (!name.empty()) {
gpr_log(GPR_DEBUG, "%s[%s] end poll", DebugTag().c_str(),
std::string(name).c_str());
}
currently_polling_ = kNotPolling;
}
// Try to CAS the state we expected to have (with no wakeups or adds)
@ -214,106 +310,100 @@ void Party::Run() {
// TODO(ctiller): consider mitigations for the accidental wakeup on owning
// waker creation case -- I currently expect this will be more expensive
// than this quick loop.
} while (!state_.compare_exchange_weak(prev_state, (prev_state & kRefMask),
std::memory_order_acq_rel,
std::memory_order_acquire));
} while (!state_.compare_exchange_weak(
prev_state, (prev_state & (kRefMask | kAllocatedMask)),
std::memory_order_acq_rel, std::memory_order_acquire));
return false;
}
void Party::DrainAdds(uint64_t& wakeups) {
// Grab the list of adds.
AddingParticipant* adding =
adding_.exchange(nullptr, std::memory_order_acquire);
// For each add, situate it and add it to the wakeup mask.
while (adding != nullptr) {
wakeups |= 1 << SituateNewParticipant(std::move(adding->participant));
// Don't leak the add request.
delete std::exchange(adding, adding->next);
}
}
void Party::AddParticipant(Participant* participant) {
uint64_t state = state_.load(std::memory_order_acquire);
uint64_t allocated;
int slot;
// Find slots for each new participant, ordering them from lowest available
// slot upwards to ensure the same poll ordering as presentation ordering to
// this function.
do {
slot = -1;
allocated = (state & kAllocatedMask) >> kAllocatedShift;
for (size_t bit = 0; bit < kMaxParticipants; bit++) {
if (allocated & (1 << bit)) continue;
slot = bit;
allocated |= 1 << bit;
break;
}
GPR_ASSERT(slot != -1);
} while (!state_.compare_exchange_weak(
state, state | (allocated << kAllocatedShift), std::memory_order_acq_rel,
std::memory_order_acquire));
void Party::AddParticipant(Arena::PoolPtr<Participant> participant) {
// Lock
auto prev_state = state_.fetch_or(kLocked, std::memory_order_acquire);
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "Party::AddParticipant(): prev_state=%s",
StateToString(prev_state).c_str());
}
if ((prev_state & kLocked) == 0) {
// Lock acquired
state_.fetch_or(1 << SituateNewParticipant(std::move(participant)),
std::memory_order_relaxed);
Run();
return;
}
// Already locked: add to the list of things to add
auto* add = new AddingParticipant{std::move(participant), nullptr};
while (!adding_.compare_exchange_weak(
add->next, add, std::memory_order_acq_rel, std::memory_order_acquire)) {
}
// And signal that there are adds waiting.
// This needs to happen after the add above: Run() will examine this bit
// first, and then decide to drain the queue - so if the ordering was reversed
// it might examine the adds pending bit, and then observe no add to drain.
prev_state =
state_.fetch_or(kLocked | kAddsPending, std::memory_order_release);
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "Party::AddParticipant(): prev_state=%s",
StateToString(prev_state).c_str());
gpr_log(GPR_DEBUG, "%s[party] Welcome %s@%d", DebugTag().c_str(),
std::string(participant->name()).c_str(), slot);
}
if ((prev_state & kLocked) == 0) {
// We queued the add but the lock was released before we signalled that.
// We acquired the lock though, so now we can run.
Run();
}
}
size_t Party::SituateNewParticipant(Arena::PoolPtr<Participant> participant) {
// First search for a free index in the participants array.
// If we find one, use it.
for (size_t i = 0; i < participants_.size(); i++) {
if (participants_[i] != nullptr) continue;
participants_[i] = std::move(participant);
return i;
}
// We've allocated the slot, next we need to populate it.
// Once we do so however a spurious wakeup could occur, and that wakeup might
// release the last ref.
// We need to hold one here.
auto ref = Ref();
participants_[slot].store(participant, std::memory_order_release);
// Otherwise, add it to the end.
GPR_ASSERT(participants_.size() < kMaxParticipants);
participants_.emplace_back(std::move(participant));
return participants_.size() - 1;
// Now we need to wake up the party.
state = state_.fetch_or((1 << slot) | kLocked, std::memory_order_relaxed);
// If the party was already locked, we're done.
if ((state & kLocked) != 0) return;
// Otherwise, we need to run the party.
RunLocked();
}
void Party::ScheduleWakeup(uint64_t participant_index) {
void Party::ScheduleWakeup(WakeupMask mask) {
// Or in the wakeup bit for the participant, AND the locked bit.
uint64_t prev_state = state_.fetch_or((1 << participant_index) | kLocked,
uint64_t prev_state = state_.fetch_or((mask & kWakeupMask) | kLocked,
std::memory_order_acquire);
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "Party::ScheduleWakeup(%" PRIu64 "): prev_state=%s",
participant_index, StateToString(prev_state).c_str());
std::vector<int> wakeups;
for (size_t i = 0; i < 8 * sizeof(WakeupMask); i++) {
if (mask & (1 << i)) wakeups.push_back(i);
}
gpr_log(GPR_DEBUG, "%s[party] ScheduleWakeup({%s}): prev_state=%s",
DebugTag().c_str(), absl::StrJoin(wakeups, ",").c_str(),
StateToString(prev_state).c_str());
}
// If the lock was not held now we hold it, so we need to run.
if ((prev_state & kLocked) == 0) Run();
if ((prev_state & kLocked) == 0) RunLocked();
}
void Party::Wakeup(void* arg) {
ScheduleWakeup(reinterpret_cast<uintptr_t>(arg));
void Party::Wakeup(WakeupMask wakeup_mask) {
ScheduleWakeup(wakeup_mask);
Unref();
}
void Party::Drop(void*) { Unref(); }
void Party::Drop(WakeupMask) { Unref(); }
std::string Party::StateToString(uint64_t state) {
std::vector<std::string> parts;
if (state & kLocked) parts.push_back("locked");
if (state & kAddsPending) parts.push_back("adds_pending");
if (state & kDestroying) parts.push_back("over");
parts.push_back(
absl::StrFormat("refs=%" PRIuPTR, (state & kRefMask) >> kRefShift));
std::vector<int> allocated;
std::vector<int> participants;
for (size_t i = 0; i < kMaxParticipants; i++) {
if ((state & (1 << i)) != 0) participants.push_back(i);
if ((state & (1ull << i)) != 0) participants.push_back(i);
if ((state & (1ull << (i + kAllocatedShift))) != 0) allocated.push_back(i);
}
if (!allocated.empty()) {
parts.push_back(
absl::StrFormat("allocated={%s}", absl::StrJoin(allocated, ",")));
}
if (!participants.empty()) {
parts.push_back(
absl::StrFormat("wakeup=%s", absl::StrJoin(participants, ",")));
absl::StrFormat("wakeup={%s}", absl::StrJoin(participants, ",")));
}
return absl::StrCat("{", absl::StrJoin(parts, " "), "}");
}

@ -24,9 +24,17 @@
#include <string>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include <grpc/support/log.h>
#include "src/core/lib/gprpp/construct_destruct.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/promise_factory.h"
#include "src/core/lib/resource_quota/arena.h"
namespace grpc_core {
@ -34,29 +42,43 @@ namespace grpc_core {
// A Party is an Activity with multiple participant promises.
class Party : public Activity, private Wakeable {
public:
explicit Party(Arena* arena) : arena_(arena) {}
Party(const Party&) = delete;
Party& operator=(const Party&) = delete;
// Spawn one promise onto the arena.
// Spawn one promise into the party.
// The promise will be polled until it is resolved, or until the party is shut
// down.
// The on_complete callback will be called with the result of the promise if
// it completes.
// A maximum of sixteen promises can be spawned onto a party.
template <typename Promise, typename OnComplete>
void Spawn(Promise promise, OnComplete on_complete);
template <typename Factory, typename OnComplete>
void Spawn(absl::string_view name, Factory promise_factory,
OnComplete on_complete);
void Orphan() final;
void Orphan() final { Crash("unused"); }
// Activity implementation: not allowed to be overridden by derived types.
void ForceImmediateRepoll() final;
void ForceImmediateRepoll(WakeupMask mask) final;
WakeupMask CurrentParticipant() const final {
GPR_DEBUG_ASSERT(currently_polling_ != kNotPolling);
return 1u << currently_polling_;
}
Waker MakeOwningWaker() final;
Waker MakeNonOwningWaker() final;
std::string ActivityDebugTag(void* arg) const final;
std::string ActivityDebugTag(WakeupMask wakeup_mask) const final;
void IncrementRefCount(DebugLocation whence = {});
void Unref(DebugLocation whence = {});
RefCountedPtr<Party> Ref() {
IncrementRefCount();
return RefCountedPtr<Party>(this);
}
Arena* arena() const { return arena_; }
protected:
explicit Party(Arena* arena, size_t initial_refs)
: state_(kOneRef * initial_refs), arena_(arena) {}
~Party() override;
// Main run loop. Must be locked.
@ -64,9 +86,17 @@ class Party : public Activity, private Wakeable {
// be done.
// Derived types will likely want to override this to set up their
// contexts before polling.
virtual void Run();
// Should not be called by derived types except as a tail call to the base
// class RunParty when overriding this method to add custom context.
// Returns true if the party is over.
virtual bool RunParty() GRPC_MUST_USE_RESULT;
Arena* arena() const { return arena_; }
bool RefIfNonZero();
// Destroy any remaining participants.
// Should be called by derived types in response to PartyOver.
// Needs to have normal context setup before calling.
void CancelRemainingParticipants();
private:
// Non-owning wakeup handle.
@ -75,67 +105,91 @@ class Party : public Activity, private Wakeable {
// One participant in the party.
class Participant {
public:
virtual ~Participant();
explicit Participant(absl::string_view name) : name_(name) {}
// Poll the participant. Return true if complete.
// Participant should take care of its own deallocation in this case.
virtual bool Poll() = 0;
// Destroy the participant before finishing.
virtual void Destroy() = 0;
// Return a Handle instance for this participant.
Wakeable* MakeNonOwningWakeable(Party* party);
absl::string_view name() const { return name_; }
protected:
~Participant();
private:
Handle* handle_ = nullptr;
absl::string_view name_;
};
// Concrete implementation of a participant for some promise & oncomplete
// type.
template <typename Promise, typename OnComplete>
template <typename SuppliedFactory, typename OnComplete>
class ParticipantImpl final : public Participant {
using Factory = promise_detail::OncePromiseFactory<void, SuppliedFactory>;
using Promise = typename Factory::Promise;
public:
ParticipantImpl(Promise promise, OnComplete on_complete)
: promise_(std::move(promise)), on_complete_(std::move(on_complete)) {}
ParticipantImpl(absl::string_view name, SuppliedFactory promise_factory,
OnComplete on_complete)
: Participant(name), on_complete_(std::move(on_complete)) {
Construct(&factory_, std::move(promise_factory));
}
~ParticipantImpl() {
if (!started_) {
Destruct(&factory_);
} else {
Destruct(&promise_);
}
}
bool Poll() override {
if (!started_) {
auto p = factory_.Make();
Destruct(&factory_);
Construct(&promise_, std::move(p));
started_ = true;
}
auto p = promise_();
if (auto* r = p.value_if_ready()) {
on_complete_(std::move(*r));
GetContext<Arena>()->DeletePooled(this);
return true;
}
return false;
}
void Destroy() override { GetContext<Arena>()->DeletePooled(this); }
private:
GPR_NO_UNIQUE_ADDRESS Promise promise_;
union {
GPR_NO_UNIQUE_ADDRESS Factory factory_;
GPR_NO_UNIQUE_ADDRESS Promise promise_;
};
GPR_NO_UNIQUE_ADDRESS OnComplete on_complete_;
bool started_ = false;
};
// One participant that's been spawned, but has not yet made it into
// participants_.
// Since it's impossible to block on locking this type, we form a queue of
// participants waiting and drain that prior to polling.
struct AddingParticipant {
Arena::PoolPtr<Participant> participant;
AddingParticipant* next;
};
// Notification that the party has finished and this instance can be deleted.
// Derived types should arrange to call CancelRemainingParticipants during
// this sequence.
virtual void PartyOver() = 0;
// Run the locked part of the party until it is unlocked.
void RunLocked();
// Wakeable implementation
void Wakeup(void* arg) final;
void Drop(void* arg) final;
void Wakeup(WakeupMask wakeup_mask) final;
void Drop(WakeupMask wakeup_mask) final;
// Internal ref counting
void Ref();
bool RefIfNonZero();
void Unref();
// Organize to wake up one participant.
void ScheduleWakeup(uint64_t participant_index);
// Start adding a participant to the party.
// Backs Spawn() after type erasure.
void AddParticipant(Arena::PoolPtr<Participant> participant);
// Drain the add queue.
void DrainAdds(uint64_t& wakeups);
// Take a new participant, and add it to the participants_ array.
// Returns the index of the participant in the array.
size_t SituateNewParticipant(Arena::PoolPtr<Participant> new_participant);
// Organize to wake up some participants.
void ScheduleWakeup(WakeupMask mask);
// Add a participant (backs Spawn, after type erasure to ParticipantFactory).
void AddParticipant(Participant* participant);
// Convert a state into a string.
static std::string StateToString(uint64_t state);
@ -158,34 +212,41 @@ class Party : public Activity, private Wakeable {
// clang-format off
// Bits used to store 16 bits of wakeups
static constexpr uint64_t kWakeupMask = 0x0000'0000'0000'ffff;
static constexpr uint64_t kWakeupMask = 0x0000'0000'0000'ffff;
// Bits used to store 16 bits of allocated participant slots.
static constexpr uint64_t kAllocatedMask = 0x0000'0000'ffff'0000;
// Bit indicating destruction has begun (refs went to zero)
static constexpr uint64_t kDestroying = 0x0000'0001'0000'0000;
// Bit indicating locked or not
static constexpr uint64_t kLocked = 0x0000'0000'0100'0000;
// Bit indicating whether there are adds pending
static constexpr uint64_t kAddsPending = 0x0000'0000'1000'0000;
static constexpr uint64_t kLocked = 0x0000'0008'0000'0000;
// Bits used to store 24 bits of ref counts
static constexpr uint64_t kRefMask = 0xffff'ff00'0000'0000;
static constexpr uint64_t kRefMask = 0xffff'ff00'0000'0000;
// clang-format on
// Number of bits reserved for wakeups gives us the maximum number of
// participants.
static constexpr size_t kMaxParticipants = 16;
// Shift to get from a participant mask to an allocated mask.
static constexpr size_t kAllocatedShift = 16;
// How far to shift to get the refcount
static constexpr size_t kRefShift = 40;
// One ref count
static constexpr uint64_t kOneRef = 1ull << kRefShift;
// Number of bits reserved for wakeups gives us the maximum number of
// participants.
static constexpr size_t kMaxParticipants = 16;
std::atomic<uint64_t> state_;
Arena* const arena_;
absl::InlinedVector<Arena::PoolPtr<Participant>, 1> participants_;
std::atomic<uint64_t> state_{kOneRef};
std::atomic<AddingParticipant*> adding_{nullptr};
uint8_t currently_polling_ = kNotPolling;
// All current participants, using a tagged format.
// If the lower bit is unset, then this is a Participant*.
// If the lower bit is set, then this is a ParticipantFactory*.
std::atomic<Participant*> participants_[kMaxParticipants] = {};
};
template <typename Promise, typename OnComplete>
void Party::Spawn(Promise promise, OnComplete on_complete) {
AddParticipant(arena_->MakePooled<ParticipantImpl<Promise, OnComplete>>(
std::move(promise), std::move(on_complete)));
template <typename Factory, typename OnComplete>
void Party::Spawn(absl::string_view name, Factory promise_factory,
OnComplete on_complete) {
AddParticipant(arena_->NewPooled<ParticipantImpl<Factory, OnComplete>>(
name, std::move(promise_factory), std::move(on_complete)));
}
} // namespace grpc_core

@ -25,7 +25,6 @@
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
@ -39,7 +38,6 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/if.h"
#include "src/core/lib/promise/interceptor_list.h"
#include "src/core/lib/promise/intra_activity_waiter.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/seq.h"
@ -160,9 +158,11 @@ class Center : public InterceptorList<T> {
case ValueState::kClosed:
case ValueState::kReadyClosed:
case ValueState::kCancelled:
case ValueState::kWaitingForAckAndClosed:
return false;
case ValueState::kReady:
case ValueState::kAcked:
case ValueState::kWaitingForAck:
return on_empty_.pending();
case ValueState::kEmpty:
value_state_ = ValueState::kReady;
@ -180,11 +180,14 @@ class Center : public InterceptorList<T> {
GPR_DEBUG_ASSERT(refs_ != 0);
switch (value_state_) {
case ValueState::kClosed:
case ValueState::kReadyClosed:
return true;
case ValueState::kCancelled:
return false;
case ValueState::kReady:
case ValueState::kReadyClosed:
case ValueState::kEmpty:
case ValueState::kWaitingForAck:
case ValueState::kWaitingForAckAndClosed:
return on_empty_.pending();
case ValueState::kAcked:
value_state_ = ValueState::kEmpty;
@ -206,12 +209,14 @@ class Center : public InterceptorList<T> {
switch (value_state_) {
case ValueState::kEmpty:
case ValueState::kAcked:
case ValueState::kWaitingForAck:
case ValueState::kWaitingForAckAndClosed:
return on_full_.pending();
case ValueState::kReadyClosed:
this->ResetInterceptorList();
value_state_ = ValueState::kClosed;
ABSL_FALLTHROUGH_INTENDED;
value_state_ = ValueState::kWaitingForAckAndClosed;
return std::move(value_);
case ValueState::kReady:
value_state_ = ValueState::kWaitingForAck;
return std::move(value_);
case ValueState::kClosed:
case ValueState::kCancelled:
@ -220,18 +225,89 @@ class Center : public InterceptorList<T> {
GPR_UNREACHABLE_CODE(return absl::nullopt);
}
// Check if the pipe is closed for sending (if there is a value still queued
// but the pipe is closed, reports closed).
Poll<bool> PollClosedForSender() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%s", DebugOpString("PollClosedForSender").c_str());
}
GPR_DEBUG_ASSERT(refs_ != 0);
switch (value_state_) {
case ValueState::kEmpty:
case ValueState::kAcked:
case ValueState::kReady:
case ValueState::kWaitingForAck:
return on_closed_.pending();
case ValueState::kWaitingForAckAndClosed:
case ValueState::kReadyClosed:
case ValueState::kClosed:
return false;
case ValueState::kCancelled:
return true;
}
GPR_UNREACHABLE_CODE(return true);
}
// Check if the pipe is closed for receiving (if there is a value still queued
// but the pipe is closed, reports open).
Poll<bool> PollClosedForReceiver() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%s", DebugOpString("PollClosedForReceiver").c_str());
}
GPR_DEBUG_ASSERT(refs_ != 0);
switch (value_state_) {
case ValueState::kEmpty:
case ValueState::kAcked:
case ValueState::kReady:
case ValueState::kReadyClosed:
case ValueState::kWaitingForAck:
case ValueState::kWaitingForAckAndClosed:
return on_closed_.pending();
case ValueState::kClosed:
return false;
case ValueState::kCancelled:
return true;
}
GPR_UNREACHABLE_CODE(return true);
}
Poll<Empty> PollEmpty() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%s", DebugOpString("PollEmpty").c_str());
}
GPR_DEBUG_ASSERT(refs_ != 0);
switch (value_state_) {
case ValueState::kReady:
case ValueState::kReadyClosed:
return on_empty_.pending();
case ValueState::kWaitingForAck:
case ValueState::kWaitingForAckAndClosed:
case ValueState::kAcked:
case ValueState::kEmpty:
case ValueState::kClosed:
case ValueState::kCancelled:
return Empty{};
}
GPR_UNREACHABLE_CODE(return Empty{});
}
void AckNext() {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_INFO, "%s", DebugOpString("AckNext").c_str());
}
switch (value_state_) {
case ValueState::kReady:
case ValueState::kWaitingForAck:
value_state_ = ValueState::kAcked;
on_empty_.Wake();
break;
case ValueState::kReadyClosed:
case ValueState::kWaitingForAckAndClosed:
this->ResetInterceptorList();
value_state_ = ValueState::kClosed;
on_closed_.Wake();
on_empty_.Wake();
on_full_.Wake();
break;
case ValueState::kClosed:
case ValueState::kCancelled:
@ -251,14 +327,22 @@ class Center : public InterceptorList<T> {
case ValueState::kAcked:
this->ResetInterceptorList();
value_state_ = ValueState::kClosed;
on_empty_.Wake();
on_full_.Wake();
on_closed_.Wake();
break;
case ValueState::kReady:
value_state_ = ValueState::kReadyClosed;
on_closed_.Wake();
break;
case ValueState::kWaitingForAck:
value_state_ = ValueState::kWaitingForAckAndClosed;
on_closed_.Wake();
break;
case ValueState::kReadyClosed:
case ValueState::kClosed:
case ValueState::kCancelled:
case ValueState::kWaitingForAckAndClosed:
break;
}
}
@ -272,13 +356,15 @@ class Center : public InterceptorList<T> {
case ValueState::kAcked:
case ValueState::kReady:
case ValueState::kReadyClosed:
case ValueState::kWaitingForAck:
case ValueState::kWaitingForAckAndClosed:
this->ResetInterceptorList();
value_state_ = ValueState::kCancelled;
on_empty_.Wake();
on_full_.Wake();
on_closed_.Wake();
break;
case ValueState::kClosed:
value_state_ = ValueState::kCancelled;
break;
case ValueState::kCancelled:
break;
}
@ -305,6 +391,8 @@ class Center : public InterceptorList<T> {
kEmpty,
// Value has been pushed but not acked, it's possible to receive.
kReady,
// Value has been read and not acked, both send/receive blocked until ack.
kWaitingForAck,
// Value has been received and acked, we can unblock senders and transition
// to empty.
kAcked,
@ -313,6 +401,9 @@ class Center : public InterceptorList<T> {
// Pipe is closed successfully, no more values can be sent
// (but one value is queued and ready to be received)
kReadyClosed,
// Pipe is closed successfully, no more values can be sent
// (but one value is queued and waiting to be acked)
kWaitingForAckAndClosed,
// Pipe is closed unsuccessfully, no more values can be sent
kCancelled,
};
@ -321,7 +412,8 @@ class Center : public InterceptorList<T> {
return absl::StrCat(DebugTag(), op, " refs=", refs_,
" value_state=", ValueStateName(value_state_),
" on_empty=", on_empty_.DebugString().c_str(),
" on_full=", on_full_.DebugString().c_str());
" on_full=", on_full_.DebugString().c_str(),
" on_closed=", on_closed_.DebugString().c_str());
}
static const char* ValueStateName(ValueState state) {
@ -336,6 +428,10 @@ class Center : public InterceptorList<T> {
return "Closed";
case ValueState::kReadyClosed:
return "ReadyClosed";
case ValueState::kWaitingForAck:
return "WaitingForAck";
case ValueState::kWaitingForAckAndClosed:
return "WaitingForAckAndClosed";
case ValueState::kCancelled:
return "Cancelled";
}
@ -349,6 +445,7 @@ class Center : public InterceptorList<T> {
ValueState value_state_;
IntraActivityWaiter on_empty_;
IntraActivityWaiter on_full_;
IntraActivityWaiter on_closed_;
// Make failure to destruct show up in ASAN builds.
#ifndef NDEBUG
@ -388,11 +485,25 @@ class PipeSender {
// receiver is either closed or able to receive another message.
PushType Push(T value);
// Return a promise that resolves when the receiver is closed.
// The resolved value is a bool - true if the pipe was cancelled, false if it
// was closed successfully.
// Checks closed from the senders perspective: that is, if there is a value in
// the pipe but the pipe is closed, reports closed.
auto AwaitClosed() {
return [center = center_]() { return center->PollClosedForSender(); };
}
// Interject PromiseFactory f into the pipeline.
// f will be called with the current value traversing the pipe, and should
// return a value to replace it with.
// Interjects at the Push end of the pipe.
template <typename Fn>
void InterceptAndMap(Fn f, DebugLocation from = {}) {
center_->PrependMap(std::move(f), from);
}
// Per above, but calls cleanup_fn when the pipe is closed.
template <typename Fn, typename OnHalfClose>
void InterceptAndMap(Fn f, OnHalfClose cleanup_fn, DebugLocation from = {}) {
center_->PrependMapWithCleanup(std::move(f), std::move(cleanup_fn), from);
@ -409,6 +520,31 @@ class PipeSender {
#endif
};
template <typename T>
class PipeReceiver;
namespace pipe_detail {
// Implementation of PipeReceiver::Next promise.
template <typename T>
class Next {
public:
Next(const Next&) = delete;
Next& operator=(const Next&) = delete;
Next(Next&& other) noexcept = default;
Next& operator=(Next&& other) noexcept = default;
Poll<absl::optional<T>> operator()() { return center_->Next(); }
private:
friend class PipeReceiver<T>;
explicit Next(RefCountedPtr<Center<T>> center) : center_(std::move(center)) {}
RefCountedPtr<Center<T>> center_;
};
} // namespace pipe_detail
// Receive end of a Pipe.
template <typename T>
class PipeReceiver {
@ -418,7 +554,7 @@ class PipeReceiver {
PipeReceiver(PipeReceiver&& other) noexcept = default;
PipeReceiver& operator=(PipeReceiver&& other) noexcept = default;
~PipeReceiver() {
if (center_ != nullptr) center_->MarkClosed();
if (center_ != nullptr) center_->MarkCancelled();
}
void Swap(PipeReceiver<T>* other) { std::swap(center_, other->center_); }
@ -428,13 +564,55 @@ class PipeReceiver {
// message was received, or no value if the other end of the pipe was closed.
// Blocks the promise until the receiver is either closed or a message is
// available.
auto Next();
auto Next() {
return Seq(
pipe_detail::Next<T>(center_->Ref()),
[center = center_->Ref()](absl::optional<T> value) {
bool open = value.has_value();
bool cancelled = center->cancelled();
return If(
open,
[center = std::move(center), value = std::move(value)]() mutable {
auto run = center->Run(std::move(value));
return Map(std::move(run),
[center = std::move(center)](
absl::optional<T> value) mutable {
if (value.has_value()) {
center->value() = std::move(*value);
return NextResult<T>(std::move(center));
} else {
center->MarkCancelled();
return NextResult<T>(true);
}
});
},
[cancelled]() { return NextResult<T>(cancelled); });
});
}
// Return a promise that resolves when the receiver is closed.
// The resolved value is a bool - true if the pipe was cancelled, false if it
// was closed successfully.
// Checks closed from the receivers perspective: that is, if there is a value
// in the pipe but the pipe is closed, reports open until that value is read.
auto AwaitClosed() {
return [center = center_]() { return center->PollClosedForReceiver(); };
}
auto AwaitEmpty() {
return [center = center_]() { return center->PollEmpty(); };
}
// Interject PromiseFactory f into the pipeline.
// f will be called with the current value traversing the pipe, and should
// return a value to replace it with.
// Interjects at the Next end of the pipe.
template <typename Fn>
void InterceptAndMap(Fn f, DebugLocation from = {}) {
center_->AppendMap(std::move(f), from);
}
// Per above, but calls cleanup_fn when the pipe is closed.
template <typename Fn, typename OnHalfClose>
void InterceptAndMapWithHalfClose(Fn f, OnHalfClose cleanup_fn,
DebugLocation from = {}) {
@ -459,12 +637,19 @@ template <typename T>
class Push {
public:
Push(const Push&) = delete;
Push& operator=(const Push&) = delete;
Push(Push&& other) noexcept = default;
Push& operator=(Push&& other) noexcept = default;
Poll<bool> operator()() {
if (center_ == nullptr) return false;
if (center_ == nullptr) {
if (grpc_trace_promise_primitives.enabled()) {
gpr_log(GPR_DEBUG, "%s Pipe push has a null center",
Activity::current()->DebugTag().c_str());
}
return false;
}
if (auto* p = absl::get_if<T>(&state_)) {
auto r = center_->Push(p);
if (auto* ok = r.value_if_ready()) {
@ -489,24 +674,6 @@ class Push {
absl::variant<T, AwaitingAck> state_;
};
// Implementation of PipeReceiver::Next promise.
template <typename T>
class Next {
public:
Next(const Next&) = delete;
Next& operator=(const Next&) = delete;
Next(Next&& other) noexcept = default;
Next& operator=(Next&& other) noexcept = default;
Poll<absl::optional<T>> operator()() { return center_->Next(); }
private:
friend class PipeReceiver<T>;
explicit Next(RefCountedPtr<Center<T>> center) : center_(std::move(center)) {}
RefCountedPtr<Center<T>> center_;
};
} // namespace pipe_detail
template <typename T>
@ -515,33 +682,6 @@ pipe_detail::Push<T> PipeSender<T>::Push(T value) {
std::move(value));
}
template <typename T>
auto PipeReceiver<T>::Next() {
return Seq(
pipe_detail::Next<T>(center_->Ref()),
[center = center_->Ref()](absl::optional<T> value) {
bool open = value.has_value();
bool cancelled = center->cancelled();
return If(
open,
[center = std::move(center), value = std::move(value)]() mutable {
auto run_interceptors = center->Run(std::move(value));
return Map(std::move(run_interceptors),
[center = std::move(center)](
absl::optional<T> value) mutable {
if (value.has_value()) {
center->value() = std::move(*value);
return NextResult<T>(std::move(center));
} else {
center->MarkCancelled();
return NextResult<T>(true);
}
});
},
[cancelled]() { return NextResult<T>(cancelled); });
});
}
template <typename T>
using PipeReceiverNextType = decltype(std::declval<PipeReceiver<T>>().Next());

@ -17,10 +17,10 @@
#include <grpc/support/port_platform.h>
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
@ -33,7 +33,7 @@ namespace grpc_core {
// Most of the time we just pass around the functor, but occasionally
// it pays to have a type erased variant, which we define here.
template <typename T>
using Promise = std::function<Poll<T>()>;
using Promise = absl::AnyInvocable<Poll<T>()>;
// Helper to execute a promise immediately and return either the result or
// nothing.

@ -54,6 +54,9 @@ Arena::~Arena() {
gpr_free_aligned(z);
z = prev_z;
}
#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
gpr_log(GPR_ERROR, "DESTRUCT_ARENA %p", this);
#endif
}
Arena* Arena::Create(size_t initial_size, MemoryAllocator* memory_allocator) {
@ -71,7 +74,7 @@ std::pair<Arena*, void*> Arena::CreateWithAlloc(
return std::make_pair(new_arena, first_alloc);
}
void Arena::Destroy() {
void Arena::DestroyManagedNewObjects() {
ManagedNewObject* p;
// Outer loop: clear the managed new object list.
// We do this repeatedly in case a destructor ends up allocating something.
@ -82,6 +85,10 @@ void Arena::Destroy() {
Destruct(std::exchange(p, p->next));
}
}
}
void Arena::Destroy() {
DestroyManagedNewObjects();
memory_allocator_->Release(total_allocated_.load(std::memory_order_relaxed));
this->~Arena();
gpr_free_aligned(this);
@ -114,7 +121,8 @@ void Arena::ManagedNewObject::Link(std::atomic<ManagedNewObject*>* head) {
}
}
void* Arena::AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head) {
void* Arena::AllocPooled(size_t obj_size, size_t alloc_size,
std::atomic<FreePoolNode*>* head) {
// ABA mitigation:
// AllocPooled may be called by multiple threads, and to remove a node from
// the free list we need to manipulate the next pointer, which may be done
@ -132,7 +140,11 @@ void* Arena::AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head) {
FreePoolNode* p = head->exchange(nullptr, std::memory_order_acquire);
// If there are no nodes in the free list, then go ahead and allocate from the
// arena.
if (p == nullptr) return Alloc(alloc_size);
if (p == nullptr) {
void* r = Alloc(alloc_size);
TracePoolAlloc(obj_size, r);
return r;
}
// We had a non-empty free list... but we own the *entire* free list.
// We only want one node, so if there are extras we'd better give them back.
if (p->next != nullptr) {
@ -151,10 +163,14 @@ void* Arena::AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head) {
extra = next;
}
}
TracePoolAlloc(obj_size, p);
return p;
}
void Arena::FreePooled(void* p, std::atomic<FreePoolNode*>* head) {
// May spuriously trace a free of an already freed object - see AllocPooled
// ABA mitigation.
TracePoolFree(p);
FreePoolNode* node = static_cast<FreePoolNode*>(p);
node->next = head->load(std::memory_order_acquire);
while (!head->compare_exchange_weak(

@ -45,6 +45,9 @@
#include "src/core/lib/promise/context.h"
#include "src/core/lib/resource_quota/memory_quota.h"
// #define GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
// #define GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
namespace grpc_core {
namespace arena_detail {
@ -114,7 +117,9 @@ PoolAndSize ChoosePoolForAllocationSize(
} // namespace arena_detail
class Arena {
using PoolSizes = absl::integer_sequence<size_t, 256, 512, 768>;
// Selected pool sizes.
// How to tune: see tools/codegen/core/optimize_arena_pool_sizes.py
using PoolSizes = absl::integer_sequence<size_t, 80, 304, 528, 1024>;
struct FreePoolNode {
FreePoolNode* next;
};
@ -130,6 +135,13 @@ class Arena {
size_t initial_size, size_t alloc_size,
MemoryAllocator* memory_allocator);
// Destroy all `ManagedNew` allocated objects.
// Allows safe destruction of these objects even if they need context held by
// the arena.
// Idempotent.
// TODO(ctiller): eliminate ManagedNew.
void DestroyManagedNewObjects();
// Destroy an arena.
void Destroy();
@ -170,6 +182,7 @@ class Arena {
return &p->t;
}
#ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
class PooledDeleter {
public:
explicit PooledDeleter(std::atomic<FreePoolNode*>* free_list)
@ -209,6 +222,7 @@ class Arena {
&pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
return PoolPtr<T>(
new (AllocPooled(
sizeof(T),
arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
free_list)) T(std::forward<Args>(args)...),
PooledDeleter(free_list));
@ -229,12 +243,95 @@ class Arena {
return PoolPtr<T[]>(new (Alloc(where.alloc_size)) T[n],
PooledDeleter(nullptr));
} else {
return PoolPtr<T[]>(
new (AllocPooled(where.alloc_size, &pools_[where.pool_index])) T[n],
PooledDeleter(&pools_[where.pool_index]));
return PoolPtr<T[]>(new (AllocPooled(where.alloc_size, where.alloc_size,
&pools_[where.pool_index])) T[n],
PooledDeleter(&pools_[where.pool_index]));
}
}
// Like MakePooled, but with manual memory management.
// The caller is responsible for calling DeletePooled() on the returned
// pointer, and expected to call it with the same type T as was passed to this
// function (else the free list returned to the arena will be corrupted).
template <typename T, typename... Args>
T* NewPooled(Args&&... args) {
auto* free_list =
&pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
return new (AllocPooled(
sizeof(T),
arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
free_list)) T(std::forward<Args>(args)...);
}
template <typename T>
void DeletePooled(T* p) {
auto* free_list =
&pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
p->~T();
FreePooled(p, free_list);
}
#else
class PooledDeleter {
public:
PooledDeleter() = default;
explicit PooledDeleter(std::nullptr_t) : delete_(false) {}
template <typename T>
void operator()(T* p) {
// TODO(ctiller): promise based filter hijacks ownership of some pointers
// to make them appear as PoolPtr without really transferring ownership,
// by setting the arena to nullptr.
// This is a transitional hack and should be removed once promise based
// filter is removed.
if (delete_) delete p;
}
bool has_freelist() const { return delete_; }
private:
bool delete_ = true;
};
template <typename T>
using PoolPtr = std::unique_ptr<T, PooledDeleter>;
// Make a unique_ptr to T that is allocated from the arena.
// When the pointer is released, the memory may be reused for other
// MakePooled(.*) calls.
// CAUTION: The amount of memory allocated is rounded up to the nearest
// value in Arena::PoolSizes, and so this may pessimize total
// arena size.
template <typename T, typename... Args>
PoolPtr<T> MakePooled(Args&&... args) {
return PoolPtr<T>(new T(std::forward<Args>(args)...), PooledDeleter());
}
// Make a unique_ptr to an array of T that is allocated from the arena.
// When the pointer is released, the memory may be reused for other
// MakePooled(.*) calls.
// One can use MakePooledArray<char> to allocate a buffer of bytes.
// CAUTION: The amount of memory allocated is rounded up to the nearest
// value in Arena::PoolSizes, and so this may pessimize total
// arena size.
template <typename T>
PoolPtr<T[]> MakePooledArray(size_t n) {
return PoolPtr<T[]>(new T[n], PooledDeleter());
}
// Like MakePooled, but with manual memory management.
// The caller is responsible for calling DeletePooled() on the returned
// pointer, and expected to call it with the same type T as was passed to this
// function (else the free list returned to the arena will be corrupted).
template <typename T, typename... Args>
T* NewPooled(Args&&... args) {
return new T(std::forward<Args>(args)...);
}
template <typename T>
void DeletePooled(T* p) {
delete p;
}
#endif
private:
struct Zone {
Zone* prev;
@ -275,9 +372,24 @@ class Arena {
void* AllocZone(size_t size);
void* AllocPooled(size_t alloc_size, std::atomic<FreePoolNode*>* head);
void* AllocPooled(size_t obj_size, size_t alloc_size,
std::atomic<FreePoolNode*>* head);
static void FreePooled(void* p, std::atomic<FreePoolNode*>* head);
void TracePoolAlloc(size_t size, void* ptr) {
(void)size;
(void)ptr;
#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
gpr_log(GPR_ERROR, "ARENA %p ALLOC %" PRIdPTR " @ %p", this, size, ptr);
#endif
}
static void TracePoolFree(void* ptr) {
(void)ptr;
#ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
gpr_log(GPR_ERROR, "FREE %p", ptr);
#endif
}
// Keep track of the total used size. We use this in our call sizing
// hysteresis.
std::atomic<size_t> total_used_{0};
@ -290,7 +402,9 @@ class Arena {
// last zone; the zone list is reverse-walked during arena destruction only.
std::atomic<Zone*> last_zone_{nullptr};
std::atomic<ManagedNewObject*> managed_new_head_{nullptr};
#ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
std::atomic<FreePoolNode*> pools_[PoolSizes::size()]{};
#endif
// The backing memory quota
MemoryAllocator* const memory_allocator_;
};

@ -18,12 +18,12 @@
#include <grpc/support/port_platform.h>
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
@ -41,6 +41,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/channel/promise_based_filter.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
@ -57,6 +58,7 @@
#include "src/core/lib/security/transport/auth_filters.h" // IWYU pragma: keep
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@ -120,12 +122,28 @@ class ServerAuthFilter::RunApplicationCode {
// memory later
RunApplicationCode(ServerAuthFilter* filter, CallArgs call_args)
: state_(GetContext<Arena>()->ManagedNew<State>(std::move(call_args))) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_ERROR,
"%s[server-auth]: Delegate to application: filter=%p this=%p "
"auth_ctx=%p",
Activity::current()->DebugTag().c_str(), filter, this,
filter->auth_context_.get());
}
filter->server_credentials_->auth_metadata_processor().process(
filter->server_credentials_->auth_metadata_processor().state,
filter->auth_context_.get(), state_->md.metadata, state_->md.count,
OnMdProcessingDone, state_);
}
RunApplicationCode(const RunApplicationCode&) = delete;
RunApplicationCode& operator=(const RunApplicationCode&) = delete;
RunApplicationCode(RunApplicationCode&& other) noexcept
: state_(std::exchange(other.state_, nullptr)) {}
RunApplicationCode& operator=(RunApplicationCode&& other) noexcept {
state_ = std::exchange(other.state_, nullptr);
return *this;
}
Poll<absl::StatusOr<CallArgs>> operator()() {
if (state_->done.load(std::memory_order_acquire)) {
return Poll<absl::StatusOr<CallArgs>>(std::move(state_->call_args));

@ -480,7 +480,7 @@ int grpc_slice_slice(grpc_slice haystack, grpc_slice needle) {
}
const uint8_t* last = haystack_bytes + haystack_len - needle_len;
for (const uint8_t* cur = haystack_bytes; cur != last; ++cur) {
for (const uint8_t* cur = haystack_bytes; cur <= last; ++cur) {
if (0 == memcmp(cur, needle_bytes, needle_len)) {
return static_cast<int>(cur - haystack_bytes);
}

File diff suppressed because it is too large Load Diff

@ -119,6 +119,11 @@ class CallContext {
// TODO(ctiller): remove this once transport APIs are promise based
void Unref(const char* reason = "call_context");
RefCountedPtr<CallContext> Ref() {
IncrementRefCount();
return RefCountedPtr<CallContext>(this);
}
grpc_call_stats* call_stats() { return &call_stats_; }
gpr_atm* peer_string_atm_ptr();
grpc_polling_entity* polling_entity() { return &pollent_; }

@ -79,6 +79,7 @@ ArenaPromise<ServerMetadataHandle> LameClientFilter::MakeCallPromise(
if (args.server_to_client_messages != nullptr) {
args.server_to_client_messages->Close();
}
args.client_initial_metadata_outstanding.Complete(true);
return Immediate(ServerMetadataFromStatus(error_));
}

@ -0,0 +1,179 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/transport/batch_builder.h"
#include <type_traits>
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
namespace grpc_core {
BatchBuilder::BatchBuilder(grpc_transport_stream_op_batch_payload* payload)
: payload_(payload) {}
void BatchBuilder::PendingCompletion::CompletionCallback(
void* self, grpc_error_handle error) {
auto* pc = static_cast<PendingCompletion*>(self);
auto* party = pc->batch->party.get();
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG, "%s[connected] Finish batch-component %s for %s: status=%s",
party->DebugTag().c_str(), std::string(pc->name()).c_str(),
grpc_transport_stream_op_batch_string(&pc->batch->batch, false).c_str(),
error.ToString().c_str());
}
party->Spawn(
"batch-completion",
[pc, error = std::move(error)]() mutable {
RefCountedPtr<Batch> batch = std::exchange(pc->batch, nullptr);
pc->done_latch.Set(std::move(error));
return Empty{};
},
[](Empty) {});
}
BatchBuilder::PendingCompletion::PendingCompletion(RefCountedPtr<Batch> batch)
: batch(std::move(batch)) {
GRPC_CLOSURE_INIT(&on_done_closure, CompletionCallback, this, nullptr);
}
BatchBuilder::Batch::Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount)
: party(static_cast<Party*>(Activity::current())->Ref()),
stream_refcount(stream_refcount) {
batch.payload = payload;
batch.is_traced = GetContext<CallContext>()->traced();
#ifndef NDEBUG
grpc_stream_ref(stream_refcount, "pending-batch");
#else
grpc_stream_ref(stream_refcount);
#endif
}
BatchBuilder::Batch::~Batch() {
auto* arena = party->arena();
if (pending_receive_message != nullptr) {
arena->DeletePooled(pending_receive_message);
}
if (pending_receive_initial_metadata != nullptr) {
arena->DeletePooled(pending_receive_initial_metadata);
}
if (pending_receive_trailing_metadata != nullptr) {
arena->DeletePooled(pending_receive_trailing_metadata);
}
if (pending_sends != nullptr) {
arena->DeletePooled(pending_sends);
}
if (batch.cancel_stream) {
arena->DeletePooled(batch.payload);
}
#ifndef NDEBUG
grpc_stream_unref(stream_refcount, "pending-batch");
#else
grpc_stream_unref(stream_refcount);
#endif
}
BatchBuilder::Batch* BatchBuilder::GetBatch(Target target) {
if (target_.has_value() &&
(target_->stream != target.stream ||
target.transport->vtable
->hacky_disable_stream_op_batch_coalescing_in_connected_channel)) {
FlushBatch();
}
if (!target_.has_value()) {
target_ = target;
batch_ = GetContext<Arena>()->NewPooled<Batch>(payload_,
target_->stream_refcount);
}
GPR_ASSERT(batch_ != nullptr);
return batch_;
}
void BatchBuilder::FlushBatch() {
GPR_ASSERT(batch_ != nullptr);
GPR_ASSERT(target_.has_value());
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG, "%s[connected] Perform transport stream op batch: %p %s",
batch_->party->DebugTag().c_str(), &batch_->batch,
grpc_transport_stream_op_batch_string(&batch_->batch, false).c_str());
}
std::exchange(batch_, nullptr)->PerformWith(*target_);
target_.reset();
}
void BatchBuilder::Batch::PerformWith(Target target) {
grpc_transport_perform_stream_op(target.transport, target.stream, &batch);
}
ServerMetadataHandle BatchBuilder::CompleteSendServerTrailingMetadata(
ServerMetadataHandle sent_metadata, absl::Status send_result,
bool actually_sent) {
if (!send_result.ok()) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG,
"%s[connected] Send metadata failed with error: %s, "
"fabricating trailing metadata",
Activity::current()->DebugTag().c_str(),
send_result.ToString().c_str());
}
sent_metadata->Clear();
sent_metadata->Set(GrpcStatusMetadata(),
static_cast<grpc_status_code>(send_result.code()));
sent_metadata->Set(GrpcMessageMetadata(),
Slice::FromCopiedString(send_result.message()));
sent_metadata->Set(GrpcCallWasCancelled(), true);
}
if (!sent_metadata->get(GrpcCallWasCancelled()).has_value()) {
if (grpc_call_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"%s[connected] Tagging trailing metadata with "
"cancellation status from transport: %s",
Activity::current()->DebugTag().c_str(),
actually_sent ? "sent => not-cancelled" : "not-sent => cancelled");
}
sent_metadata->Set(GrpcCallWasCancelled(), !actually_sent);
}
return sent_metadata;
}
BatchBuilder::Batch* BatchBuilder::MakeCancel(
grpc_stream_refcount* stream_refcount, absl::Status status) {
auto* arena = GetContext<Arena>();
auto* payload =
arena->NewPooled<grpc_transport_stream_op_batch_payload>(nullptr);
auto* batch = arena->NewPooled<Batch>(payload, stream_refcount);
batch->batch.cancel_stream = true;
payload->cancel_stream.cancel_error = std::move(status);
return batch;
}
void BatchBuilder::Cancel(Target target, absl::Status status) {
auto* batch = MakeCancel(target.stream_refcount, std::move(status));
batch->batch.on_complete = NewClosure(
[batch](absl::Status) { batch->party->arena()->DeletePooled(batch); });
batch->PerformWith(target);
}
} // namespace grpc_core

@ -0,0 +1,468 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include <grpc/status.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/map.h"
#include "src/core/lib/promise/party.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
namespace grpc_core {
// Build up a transport stream op batch for a stream for a promise based
// connected channel.
// Offered as a context from Call, so that it can collect ALL the updates during
// a single party round, and then push them down to the transport as a single
// transaction.
class BatchBuilder {
public:
explicit BatchBuilder(grpc_transport_stream_op_batch_payload* payload);
~BatchBuilder() {
if (batch_ != nullptr) FlushBatch();
}
struct Target {
grpc_transport* transport;
grpc_stream* stream;
grpc_stream_refcount* stream_refcount;
};
BatchBuilder(const BatchBuilder&) = delete;
BatchBuilder& operator=(const BatchBuilder&) = delete;
// Returns a promise that will resolve to a Status when the send is completed.
auto SendMessage(Target target, MessageHandle message);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientInitialMetadata(Target target, ClientMetadataHandle metadata);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a Status when the send is completed.
auto SendServerInitialMetadata(Target target, ServerMetadataHandle metadata);
// Returns a promise that will resolve to a ServerMetadataHandle when the send
// is completed.
//
// If convert_to_cancellation is true, then the status will be converted to a
// cancellation batch instead of a trailing metadata op in a coalesced batch.
//
// This quirk exists as in the filter based stack upon which our transports
// were written if a trailing metadata op were sent it always needed to be
// paired with an initial op batch, and the transports would wait for the
// initial metadata batch to arrive (in case of reordering up the stack).
auto SendServerTrailingMetadata(Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation);
// Returns a promise that will resolve to a StatusOr<optional<MessageHandle>>
// when a message is received.
// Error => non-ok status
// End of stream => Ok, nullopt (no message)
// Message => Ok, message
auto ReceiveMessage(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ClientMetadataHandle>
// when the receive is complete.
auto ReceiveClientTrailingMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerInitialMetadata(Target target);
// Returns a promise that will resolve to a StatusOr<ServerMetadataHandle>
// when the receive is complete.
auto ReceiveServerTrailingMetadata(Target target);
// Send a cancellation: does not occupy the same payload, nor does it
// coalesce with other ops.
void Cancel(Target target, absl::Status status);
private:
struct Batch;
// Base pending operation
struct PendingCompletion {
explicit PendingCompletion(RefCountedPtr<Batch> batch);
virtual absl::string_view name() const = 0;
static void CompletionCallback(void* self, grpc_error_handle error);
grpc_closure on_done_closure;
Latch<absl::Status> done_latch;
RefCountedPtr<Batch> batch;
protected:
~PendingCompletion() = default;
};
// A pending receive message.
struct PendingReceiveMessage final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "receive_message"; }
MessageHandle IntoMessageHandle() {
return GetContext<Arena>()->MakePooled<Message>(std::move(*payload),
flags);
}
absl::optional<SliceBuffer> payload;
uint32_t flags;
};
// A pending receive metadata.
struct PendingReceiveMetadata : public PendingCompletion {
using PendingCompletion::PendingCompletion;
Arena::PoolPtr<grpc_metadata_batch> metadata =
GetContext<Arena>()->MakePooled<grpc_metadata_batch>(
GetContext<Arena>());
protected:
~PendingReceiveMetadata() = default;
};
struct PendingReceiveInitialMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_initial_metadata";
}
};
struct PendingReceiveTrailingMetadata final : public PendingReceiveMetadata {
using PendingReceiveMetadata::PendingReceiveMetadata;
absl::string_view name() const override {
return "receive_trailing_metadata";
}
};
// Pending sends in a batch
struct PendingSends final : public PendingCompletion {
using PendingCompletion::PendingCompletion;
absl::string_view name() const override { return "sends"; }
MessageHandle send_message;
Arena::PoolPtr<grpc_metadata_batch> send_initial_metadata;
Arena::PoolPtr<grpc_metadata_batch> send_trailing_metadata;
bool trailing_metadata_sent = false;
};
// One outstanding batch.
struct Batch final {
Batch(grpc_transport_stream_op_batch_payload* payload,
grpc_stream_refcount* stream_refcount);
~Batch();
Batch(const Batch&) = delete;
Batch& operator=(const Batch&) = delete;
void IncrementRefCount() { ++refs; }
void Unref() {
if (--refs == 0) party->arena()->DeletePooled(this);
}
RefCountedPtr<Batch> Ref() {
IncrementRefCount();
return RefCountedPtr<Batch>(this);
}
// Get an initialized pending completion.
// There are four pending completions potentially contained within a batch.
// They can be rather large so we don't create all of them always. Instead,
// we dynamically create them on the arena as needed.
// This method either returns the existing completion in a batch if that
// completion has already been initialized, or it creates a new completion
// and returns that.
template <typename T>
T* GetInitializedCompletion(T*(Batch::*field)) {
if (this->*field != nullptr) return this->*field;
this->*field = party->arena()->NewPooled<T>(Ref());
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Add batch closure for %s @ %s",
Activity::current()->DebugTag().c_str(),
std::string((this->*field)->name()).c_str(),
(this->*field)->on_done_closure.DebugString().c_str());
}
return this->*field;
}
// grpc_transport_perform_stream_op on target.stream
void PerformWith(Target target);
// Take a promise, and return a promise that holds a ref on this batch until
// the promise completes or is cancelled.
template <typename P>
auto RefUntil(P promise) {
return [self = Ref(), promise = std::move(promise)]() mutable {
return promise();
};
}
grpc_transport_stream_op_batch batch;
PendingReceiveMessage* pending_receive_message = nullptr;
PendingReceiveInitialMetadata* pending_receive_initial_metadata = nullptr;
PendingReceiveTrailingMetadata* pending_receive_trailing_metadata = nullptr;
PendingSends* pending_sends = nullptr;
const RefCountedPtr<Party> party;
grpc_stream_refcount* const stream_refcount;
uint8_t refs = 0;
};
// Get a batch for the given target.
// Currently: if the current batch is for this target, return it - otherwise
// flush the batch and start a new one (and return that).
// This function may change in the future to allow multiple batches to be
// building at once (if that turns out to be useful for hedging).
Batch* GetBatch(Target target);
// Flush the current batch down to the transport.
void FlushBatch();
// Create a cancel batch with its own payload.
Batch* MakeCancel(grpc_stream_refcount* stream_refcount, absl::Status status);
// Note: we don't distinguish between client and server metadata here.
// At the time of writing they're both the same thing - and it's unclear
// whether we'll get to separate them prior to batches going away or not.
// So for now we claim YAGNI and just do the simplest possible implementation.
auto SendInitialMetadata(Target target,
Arena::PoolPtr<grpc_metadata_batch> md);
auto ReceiveInitialMetadata(Target target);
auto ReceiveTrailingMetadata(Target target);
// Combine send status and server metadata into a final status to report back
// to the containing call.
static ServerMetadataHandle CompleteSendServerTrailingMetadata(
ServerMetadataHandle sent_metadata, absl::Status send_result,
bool actually_sent);
grpc_transport_stream_op_batch_payload* const payload_;
absl::optional<Target> target_;
Batch* batch_ = nullptr;
};
inline auto BatchBuilder::SendMessage(Target target, MessageHandle message) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue send message: %s",
Activity::current()->DebugTag().c_str(),
message->DebugString().c_str());
}
auto* batch = GetBatch(target);
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_message = true;
payload_->send_message.send_message = message->payload();
payload_->send_message.flags = message->flags();
pc->send_message = std::move(message);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendInitialMetadata(
Target target, Arena::PoolPtr<grpc_metadata_batch> md) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue send initial metadata: %s",
Activity::current()->DebugTag().c_str(), md->DebugString().c_str());
}
auto* batch = GetBatch(target);
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_initial_metadata = true;
payload_->send_initial_metadata.send_initial_metadata = md.get();
pc->send_initial_metadata = std::move(md);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendClientInitialMetadata(
Target target, ClientMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendClientTrailingMetadata(Target target) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue send trailing metadata",
Activity::current()->DebugTag().c_str());
}
auto* batch = GetBatch(target);
auto* pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.on_complete = &pc->on_done_closure;
batch->batch.send_trailing_metadata = true;
auto metadata =
GetContext<Arena>()->MakePooled<grpc_metadata_batch>(GetContext<Arena>());
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = nullptr;
pc->send_trailing_metadata = std::move(metadata);
return batch->RefUntil(pc->done_latch.WaitAndCopy());
}
inline auto BatchBuilder::SendServerInitialMetadata(
Target target, ServerMetadataHandle metadata) {
return SendInitialMetadata(target, std::move(metadata));
}
inline auto BatchBuilder::SendServerTrailingMetadata(
Target target, ServerMetadataHandle metadata,
bool convert_to_cancellation) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] %s: %s",
Activity::current()->DebugTag().c_str(),
convert_to_cancellation ? "Send trailing metadata as cancellation"
: "Queue send trailing metadata",
metadata->DebugString().c_str());
}
Batch* batch;
PendingSends* pc;
if (convert_to_cancellation) {
const auto status_code =
metadata->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
auto status = grpc_error_set_int(
absl::Status(static_cast<absl::StatusCode>(status_code),
metadata->GetOrCreatePointer(GrpcMessageMetadata())
->as_string_view()),
StatusIntProperty::kRpcStatus, status_code);
batch = MakeCancel(target.stream_refcount, std::move(status));
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
} else {
batch = GetBatch(target);
pc = batch->GetInitializedCompletion(&Batch::pending_sends);
batch->batch.send_trailing_metadata = true;
payload_->send_trailing_metadata.send_trailing_metadata = metadata.get();
payload_->send_trailing_metadata.sent = &pc->trailing_metadata_sent;
}
batch->batch.on_complete = &pc->on_done_closure;
pc->send_trailing_metadata = std::move(metadata);
auto promise = batch->RefUntil(
Map(pc->done_latch.WaitAndCopy(), [pc](absl::Status status) {
return CompleteSendServerTrailingMetadata(
std::move(pc->send_trailing_metadata), std::move(status),
pc->trailing_metadata_sent);
}));
if (convert_to_cancellation) {
batch->PerformWith(target);
}
return promise;
}
inline auto BatchBuilder::ReceiveMessage(Target target) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue receive message",
Activity::current()->DebugTag().c_str());
}
auto* batch = GetBatch(target);
auto* pc = batch->GetInitializedCompletion(&Batch::pending_receive_message);
batch->batch.recv_message = true;
payload_->recv_message.recv_message_ready = &pc->on_done_closure;
payload_->recv_message.recv_message = &pc->payload;
payload_->recv_message.flags = &pc->flags;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status)
-> absl::StatusOr<absl::optional<MessageHandle>> {
if (!status.ok()) return status;
if (!pc->payload.has_value()) return absl::nullopt;
return pc->IntoMessageHandle();
}));
}
inline auto BatchBuilder::ReceiveInitialMetadata(Target target) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue receive initial metadata",
Activity::current()->DebugTag().c_str());
}
auto* batch = GetBatch(target);
auto* pc =
batch->GetInitializedCompletion(&Batch::pending_receive_initial_metadata);
batch->batch.recv_initial_metadata = true;
payload_->recv_initial_metadata.recv_initial_metadata_ready =
&pc->on_done_closure;
payload_->recv_initial_metadata.recv_initial_metadata = pc->metadata.get();
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ClientMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveServerInitialMetadata(Target target) {
return ReceiveInitialMetadata(target);
}
inline auto BatchBuilder::ReceiveTrailingMetadata(Target target) {
if (grpc_call_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s[connected] Queue receive trailing metadata",
Activity::current()->DebugTag().c_str());
}
auto* batch = GetBatch(target);
auto* pc = batch->GetInitializedCompletion(
&Batch::pending_receive_trailing_metadata);
batch->batch.recv_trailing_metadata = true;
payload_->recv_trailing_metadata.recv_trailing_metadata_ready =
&pc->on_done_closure;
payload_->recv_trailing_metadata.recv_trailing_metadata = pc->metadata.get();
payload_->recv_trailing_metadata.collect_stats =
&GetContext<CallContext>()->call_stats()->transport_stream_stats;
return batch->RefUntil(
Map(pc->done_latch.Wait(),
[pc](absl::Status status) -> absl::StatusOr<ServerMetadataHandle> {
if (!status.ok()) return status;
return std::move(pc->metadata);
}));
}
inline auto BatchBuilder::ReceiveClientTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
inline auto BatchBuilder::ReceiveServerTrailingMetadata(Target target) {
return ReceiveTrailingMetadata(target);
}
template <>
struct ContextType<BatchBuilder> {};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_BATCH_BUILDER_H

@ -441,6 +441,15 @@ struct GrpcStatusFromWire {
static absl::string_view DisplayValue(bool x) { return x ? "true" : "false"; }
};
// Annotation to denote that this call qualifies for cancelled=1 for the
// RECV_CLOSE_ON_SERVER op
struct GrpcCallWasCancelled {
static absl::string_view DebugKey() { return "GrpcCallWasCancelled"; }
static constexpr bool kRepeatable = false;
using ValueType = bool;
static absl::string_view DisplayValue(bool x) { return x ? "true" : "false"; }
};
// Annotation added by client surface code to denote wait-for-ready state
struct WaitForReady {
struct ValueType {
@ -1378,7 +1387,8 @@ using grpc_metadata_batch_base = grpc_core::MetadataMap<
// Non-encodable things
grpc_core::GrpcStreamNetworkState, grpc_core::PeerString,
grpc_core::GrpcStatusContext, grpc_core::GrpcStatusFromWire,
grpc_core::WaitForReady, grpc_core::GrpcTrailersOnly>;
grpc_core::GrpcCallWasCancelled, grpc_core::WaitForReady,
grpc_core::GrpcTrailersOnly>;
struct grpc_metadata_batch : public grpc_metadata_batch_base {
using grpc_metadata_batch_base::grpc_metadata_batch_base;

@ -26,13 +26,17 @@
#include <new>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/transport_impl.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount(false,
@ -271,11 +275,35 @@ namespace grpc_core {
ServerMetadataHandle ServerMetadataFromStatus(const absl::Status& status,
Arena* arena) {
auto hdl = arena->MakePooled<ServerMetadata>(arena);
hdl->Set(GrpcStatusMetadata(), static_cast<grpc_status_code>(status.code()));
grpc_status_code code;
std::string message;
grpc_error_get_status(status, Timestamp::InfFuture(), &code, &message,
nullptr, nullptr);
hdl->Set(GrpcStatusMetadata(), code);
if (!status.ok()) {
hdl->Set(GrpcMessageMetadata(), Slice::FromCopiedString(status.message()));
hdl->Set(GrpcMessageMetadata(), Slice::FromCopiedString(message));
}
return hdl;
}
std::string Message::DebugString() const {
std::string out = absl::StrCat(payload_.Length(), "b");
auto flags = flags_;
auto explain = [&flags, &out](uint32_t flag, absl::string_view name) {
if (flags & flag) {
flags &= ~flag;
absl::StrAppend(&out, ":", name);
}
};
explain(GRPC_WRITE_BUFFER_HINT, "write_buffer");
explain(GRPC_WRITE_NO_COMPRESS, "no_compress");
explain(GRPC_WRITE_THROUGH, "write_through");
explain(GRPC_WRITE_INTERNAL_COMPRESS, "compress");
explain(GRPC_WRITE_INTERNAL_TEST_ONLY_WAS_COMPRESSED, "was_compressed");
if (flags != 0) {
absl::StrAppend(&out, ":huh=0x", absl::Hex(flags));
}
return out;
}
} // namespace grpc_core

@ -27,6 +27,7 @@
#include <functional>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
@ -53,6 +54,7 @@
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/context.h"
#include "src/core/lib/promise/detail/status.h"
#include "src/core/lib/promise/latch.h"
#include "src/core/lib/promise/pipe.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
@ -105,6 +107,8 @@ class Message {
SliceBuffer* payload() { return &payload_; }
const SliceBuffer* payload() const { return &payload_; }
std::string DebugString() const;
private:
SliceBuffer payload_;
uint32_t flags_ = 0;
@ -143,11 +147,70 @@ struct StatusCastImpl<ServerMetadataHandle, absl::Status&> {
}
};
// Move only type that tracks call startup.
// Allows observation of when client_initial_metadata has been processed by the
// end of the local call stack.
// Interested observers can call Wait() to obtain a promise that will resolve
// when all local client_initial_metadata processing has completed.
// The result of this token is either true on successful completion, or false
// if the metadata was not sent.
// To set a successful completion, call Complete(true). For failure, call
// Complete(false).
// If Complete is not called, the destructor of a still held token will complete
// with failure.
// Transports should hold this token until client_initial_metadata has passed
// any flow control (eg MAX_CONCURRENT_STREAMS for http2).
class ClientInitialMetadataOutstandingToken {
public:
static ClientInitialMetadataOutstandingToken Empty() {
return ClientInitialMetadataOutstandingToken();
}
static ClientInitialMetadataOutstandingToken New(
Arena* arena = GetContext<Arena>()) {
ClientInitialMetadataOutstandingToken token;
token.latch_ = arena->New<Latch<bool>>();
return token;
}
ClientInitialMetadataOutstandingToken(
const ClientInitialMetadataOutstandingToken&) = delete;
ClientInitialMetadataOutstandingToken& operator=(
const ClientInitialMetadataOutstandingToken&) = delete;
ClientInitialMetadataOutstandingToken(
ClientInitialMetadataOutstandingToken&& other) noexcept
: latch_(std::exchange(other.latch_, nullptr)) {}
ClientInitialMetadataOutstandingToken& operator=(
ClientInitialMetadataOutstandingToken&& other) noexcept {
latch_ = std::exchange(other.latch_, nullptr);
return *this;
}
~ClientInitialMetadataOutstandingToken() {
if (latch_ != nullptr) latch_->Set(false);
}
void Complete(bool success) { std::exchange(latch_, nullptr)->Set(success); }
// Returns a promise that will resolve when this object (or its moved-from
// ancestor) is dropped.
auto Wait() { return latch_->Wait(); }
private:
ClientInitialMetadataOutstandingToken() = default;
Latch<bool>* latch_ = nullptr;
};
using ClientInitialMetadataOutstandingTokenWaitType =
decltype(std::declval<ClientInitialMetadataOutstandingToken>().Wait());
struct CallArgs {
// Initial metadata from the client to the server.
// During promise setup this can be manipulated by filters (and then
// passed on to the next filter).
ClientMetadataHandle client_initial_metadata;
// Token indicating that client_initial_metadata is still being processed.
// This should be moved around and only destroyed when the transport is
// satisfied that the metadata has passed any flow control measures it has.
ClientInitialMetadataOutstandingToken client_initial_metadata_outstanding;
// Initial metadata from the server to the client.
// Set once when it's available.
// During promise setup filters can substitute their own latch for this
@ -330,6 +393,12 @@ struct grpc_transport_stream_op_batch {
/// Is this stream traced
bool is_traced : 1;
bool HasOp() const {
return send_initial_metadata || send_trailing_metadata || send_message ||
recv_initial_metadata || recv_message || recv_trailing_metadata ||
cancel_stream;
}
//**************************************************************************
// remaining fields are initialized and used at the discretion of the
// current handler of the op

@ -38,6 +38,13 @@ typedef struct grpc_transport_vtable {
// layers and initialized by the transport
size_t sizeof_stream; // = sizeof(transport stream)
// HACK: inproc does not handle stream op batch callbacks correctly (receive
// ops are required to complete prior to on_complete triggering).
// This flag is used to disable coalescing of batches in connected_channel for
// that specific transport.
// TODO(ctiller): This ought not be necessary once we have promises complete.
bool hacky_disable_stream_op_batch_coalescing_in_connected_channel;
// name of this transport implementation
const char* name;

@ -661,6 +661,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/party.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/trace.cc',
'src/core/lib/resolver/resolver.cc',
@ -764,6 +765,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/batch_builder.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',

@ -28,13 +28,16 @@
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include <grpc/byte_buffer.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@ -120,6 +123,35 @@ int raw_byte_buffer_eq_slice(grpc_byte_buffer* rbb, grpc_slice b) {
ok = GRPC_SLICE_LENGTH(a) == GRPC_SLICE_LENGTH(b) &&
0 == memcmp(GRPC_SLICE_START_PTR(a), GRPC_SLICE_START_PTR(b),
GRPC_SLICE_LENGTH(a));
if (!ok) {
gpr_log(GPR_ERROR,
"SLICE MISMATCH: left_length=%" PRIuPTR " right_length=%" PRIuPTR,
GRPC_SLICE_LENGTH(a), GRPC_SLICE_LENGTH(b));
std::string out;
const char* a_str = reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(a));
const char* b_str = reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(b));
for (size_t i = 0; i < std::max(GRPC_SLICE_LENGTH(a), GRPC_SLICE_LENGTH(b));
i++) {
if (i >= GRPC_SLICE_LENGTH(a)) {
absl::StrAppend(&out, "\u001b[36m", // cyan
absl::CEscape(absl::string_view(&b_str[i], 1)),
"\u001b[0m");
} else if (i >= GRPC_SLICE_LENGTH(b)) {
absl::StrAppend(&out, "\u001b[35m", // magenta
absl::CEscape(absl::string_view(&a_str[i], 1)),
"\u001b[0m");
} else if (a_str[i] == b_str[i]) {
absl::StrAppend(&out, absl::CEscape(absl::string_view(&a_str[i], 1)));
} else {
absl::StrAppend(&out, "\u001b[31m", // red
absl::CEscape(absl::string_view(&a_str[i], 1)),
"\u001b[33m", // yellow
absl::CEscape(absl::string_view(&b_str[i], 1)),
"\u001b[0m");
}
gpr_log(GPR_ERROR, "%s", out.c_str());
}
}
grpc_slice_unref(a);
grpc_slice_unref(b);
return ok;

@ -210,7 +210,7 @@ static void on_p2s_sent_message(void* arg, int success) {
grpc_op op;
grpc_call_error err;
grpc_byte_buffer_destroy(pc->c2p_msg);
grpc_byte_buffer_destroy(std::exchange(pc->c2p_msg, nullptr));
if (!pc->proxy->shutdown && success) {
op.op = GRPC_OP_RECV_MESSAGE;
op.flags = 0;

@ -42,7 +42,10 @@
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/promise.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/test_config.h"
@ -397,12 +400,23 @@ static grpc_error_handle init_channel_elem(
static void destroy_channel_elem(grpc_channel_element* /*elem*/) {}
static const grpc_channel_filter test_filter = {
grpc_call_next_op, nullptr,
grpc_channel_next_op, 0,
init_call_elem, grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem, 0,
init_channel_elem, grpc_channel_stack_no_post_init,
destroy_channel_elem, grpc_channel_next_get_info,
grpc_call_next_op,
[](grpc_channel_element*, grpc_core::CallArgs,
grpc_core::NextPromiseFactory)
-> grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle> {
return grpc_core::Immediate(grpc_core::ServerMetadataFromStatus(
absl::PermissionDeniedError("access denied")));
},
grpc_channel_next_op,
0,
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
0,
init_channel_elem,
grpc_channel_stack_no_post_init,
destroy_channel_elem,
grpc_channel_next_get_info,
"filter_init_fails"};
//******************************************************************************

@ -82,6 +82,9 @@ static void test_max_message_length_on_request(
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
grpc_slice expect_in_details = grpc_slice_from_copied_string(
send_limit ? "Sent message larger than max (11 vs. 5)"
: "Received message larger than max (11 vs. 5)");
int was_cancelled = 2;
grpc_channel_args* client_args = nullptr;
@ -220,13 +223,10 @@ static void test_max_message_length_on_request(
done:
GPR_ASSERT(status == GRPC_STATUS_RESOURCE_EXHAUSTED);
GPR_ASSERT(
grpc_slice_str_cmp(
details, send_limit
? "Sent message larger than max (11 vs. 5)"
: "Received message larger than max (11 vs. 5)") == 0);
GPR_ASSERT(grpc_slice_slice(details, expect_in_details) >= 0);
grpc_slice_unref(details);
grpc_slice_unref(expect_in_details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
@ -265,6 +265,9 @@ static void test_max_message_length_on_response(
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
grpc_slice expect_in_details = grpc_slice_from_copied_string(
send_limit ? "Sent message larger than max (11 vs. 5)"
: "Received message larger than max (11 vs. 5)");
int was_cancelled = 2;
grpc_channel_args* client_args = nullptr;
@ -404,13 +407,10 @@ static void test_max_message_length_on_response(
GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/service/method"));
GPR_ASSERT(status == GRPC_STATUS_RESOURCE_EXHAUSTED);
GPR_ASSERT(
grpc_slice_str_cmp(
details, send_limit
? "Sent message larger than max (11 vs. 5)"
: "Received message larger than max (11 vs. 5)") == 0);
GPR_ASSERT(grpc_slice_slice(details, expect_in_details) >= 0);
grpc_slice_unref(details);
grpc_slice_unref(expect_in_details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);

@ -154,7 +154,8 @@ TEST_F(ClientAuthFilterTest, CallCredsFails) {
auto promise = filter->MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch_,
Arena::PooledDeleter(nullptr)),
nullptr, nullptr, nullptr},
ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
nullptr},
[&](CallArgs /*call_args*/) {
return ArenaPromise<ServerMetadataHandle>(
[&]() -> Poll<ServerMetadataHandle> {
@ -183,7 +184,8 @@ TEST_F(ClientAuthFilterTest, RewritesInvalidStatusFromCallCreds) {
auto promise = filter->MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch_,
Arena::PooledDeleter(nullptr)),
nullptr, nullptr, nullptr},
ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
nullptr},
[&](CallArgs /*call_args*/) {
return ArenaPromise<ServerMetadataHandle>(
[&]() -> Poll<ServerMetadataHandle> {

@ -71,7 +71,8 @@ TEST(ClientAuthorityFilterTest, PromiseCompletesImmediatelyAndSetsAuthority) {
auto promise = filter.MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch,
Arena::PooledDeleter(nullptr)),
nullptr, nullptr, nullptr},
ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
nullptr},
[&](CallArgs call_args) {
EXPECT_EQ(call_args.client_initial_metadata
->get_pointer(HttpAuthorityMetadata())
@ -106,7 +107,8 @@ TEST(ClientAuthorityFilterTest,
auto promise = filter.MakeCallPromise(
CallArgs{ClientMetadataHandle(&initial_metadata_batch,
Arena::PooledDeleter(nullptr)),
nullptr, nullptr, nullptr},
ClientInitialMetadataOutstandingToken::Empty(), nullptr, nullptr,
nullptr},
[&](CallArgs call_args) {
EXPECT_EQ(call_args.client_initial_metadata
->get_pointer(HttpAuthorityMetadata())

@ -110,6 +110,8 @@ namespace {
const grpc_transport_vtable kFakeTransportVTable = {
// sizeof_stream
0,
// hacky_disable_stream_op_batch_coalescing_in_connected_channel
false,
// name
"fake_transport",
// init_stream
@ -402,16 +404,16 @@ class MainLoop {
public:
WakeCall(MainLoop* main_loop, uint32_t id)
: main_loop_(main_loop), id_(id) {}
void Wakeup(void*) override {
void Wakeup(WakeupMask) override {
for (const uint32_t already : main_loop_->wakeups_) {
if (already == id_) return;
}
main_loop_->wakeups_.push_back(id_);
delete this;
}
void Drop(void*) override { delete this; }
void Drop(WakeupMask) override { delete this; }
std::string ActivityDebugTag(void*) const override {
std::string ActivityDebugTag(WakeupMask) const override {
return "WakeCall(" + std::to_string(id_) + ")";
}
@ -476,6 +478,7 @@ class MainLoop {
auto* server_initial_metadata = arena_->New<Pipe<ServerMetadataHandle>>();
CallArgs call_args{std::move(*LoadMetadata(client_initial_metadata,
&client_initial_metadata_)),
ClientInitialMetadataOutstandingToken::Empty(),
&server_initial_metadata->sender, nullptr, nullptr};
if (is_client) {
promise_ = main_loop_->channel_stack_->MakeClientCallPromise(
@ -524,9 +527,9 @@ class MainLoop {
}
void Orphan() override { abort(); }
void ForceImmediateRepoll() override { context_->set_continue(); }
void ForceImmediateRepoll(WakeupMask) override { context_->set_continue(); }
Waker MakeOwningWaker() override {
return Waker(new WakeCall(main_loop_, id_), nullptr);
return Waker(new WakeCall(main_loop_, id_), 0);
}
Waker MakeNonOwningWaker() override { return MakeOwningWaker(); }

@ -53,7 +53,7 @@ TEST(RefCounted, ExtraRef) {
foo->Unref();
}
class Value : public RefCounted<Value, PolymorphicRefCount, kUnrefNoDelete> {
class Value : public RefCounted<Value, PolymorphicRefCount, UnrefNoDelete> {
public:
Value(int value, std::set<std::unique_ptr<Value>>* registry) : value_(value) {
registry->emplace(this);
@ -108,7 +108,7 @@ TEST(RefCounted, NoDeleteUponUnref) {
class ValueInExternalAllocation
: public RefCounted<ValueInExternalAllocation, PolymorphicRefCount,
kUnrefCallDtor> {
UnrefCallDtor> {
public:
explicit ValueInExternalAllocation(int value) : value_(value) {}

@ -20,6 +20,8 @@
#include "src/core/lib/gprpp/thd.h"
#include <atomic>
#include "gtest/gtest.h"
#include <grpc/support/sync.h>
@ -49,7 +51,7 @@ static void thd_body1(void* v) {
}
// Test that we can create a number of threads, wait for them, and join them.
static void test1(void) {
TEST(ThreadTest, CanCreateWaitAndJoin) {
grpc_core::Thread thds[NUM_THREADS];
struct test t;
gpr_mu_init(&t.mu);
@ -76,7 +78,7 @@ static void test1(void) {
static void thd_body2(void* /*v*/) {}
// Test that we can create a number of threads and join them.
static void test2(void) {
TEST(ThreadTest, CanCreateSomeAndJoinThem) {
grpc_core::Thread thds[NUM_THREADS];
for (auto& th : thds) {
bool ok;
@ -89,11 +91,23 @@ static void test2(void) {
}
}
// -------------------------------------------------
TEST(ThdTest, MainTest) {
test1();
test2();
// Test that we can create a thread with an AnyInvocable.
TEST(ThreadTest, CanCreateWithAnyInvocable) {
grpc_core::Thread thds[NUM_THREADS];
std::atomic<int> count_run{0};
for (auto& th : thds) {
bool ok;
th = grpc_core::Thread(
"grpc_thread_body2_test",
[&count_run]() { count_run.fetch_add(1, std::memory_order_relaxed); },
&ok);
ASSERT_TRUE(ok);
th.Start();
}
for (auto& th : thds) {
th.Join();
}
EXPECT_EQ(count_run.load(std::memory_order_relaxed), NUM_THREADS);
}
int main(int argc, char** argv) {

@ -127,7 +127,10 @@ grpc_cc_test(
# is.
name = "promise_map_test",
srcs = ["map_test.cc"],
external_deps = ["gtest"],
external_deps = [
"absl/functional:any_invocable",
"gtest",
],
language = "c++",
tags = ["promise_test"],
uses_event_engine = False,
@ -164,7 +167,6 @@ grpc_cc_test(
uses_event_engine = False,
uses_polling = False,
deps = [
"//:promise",
"//src/core:poll",
"//src/core:promise_factory",
],
@ -307,25 +309,6 @@ grpc_cc_test(
],
)
grpc_cc_test(
name = "observable_test",
srcs = ["observable_test.cc"],
external_deps = [
"absl/status",
"gtest",
],
language = "c++",
tags = ["promise_test"],
uses_event_engine = False,
uses_polling = False,
deps = [
"test_wakeup_schedulers",
"//:promise",
"//src/core:observable",
"//src/core:seq",
],
)
grpc_cc_test(
name = "for_each_test",
srcs = ["for_each_test.cc"],
@ -385,6 +368,7 @@ grpc_cc_test(
name = "pipe_test",
srcs = ["pipe_test.cc"],
external_deps = [
"absl/functional:function_ref",
"absl/status",
"gtest",
],
@ -394,6 +378,7 @@ grpc_cc_test(
uses_polling = False,
deps = [
"test_wakeup_schedulers",
"//:gpr",
"//:grpc",
"//:ref_counted_ptr",
"//src/core:activity",
@ -432,6 +417,7 @@ grpc_proto_fuzzer(
srcs = ["promise_fuzzer.cc"],
corpus = "promise_fuzzer_corpus",
external_deps = [
"absl/functional:any_invocable",
"absl/status",
"absl/types:optional",
],
@ -526,7 +512,6 @@ grpc_cc_test(
"//:exec_ctx",
"//:gpr",
"//:grpc_unsecure",
"//:orphanable",
"//:ref_counted_ptr",
"//src/core:1999",
"//src/core:context",

@ -14,8 +14,6 @@
#include "src/core/lib/promise/if.h"
#include <utility>
#include "gtest/gtest.h"
namespace grpc_core {

@ -52,6 +52,33 @@ TEST(LatchTest, Works) {
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
TEST(LatchTest, WaitAndCopyWorks) {
Latch<std::string> latch;
StrictMock<MockFunction<void(absl::Status)>> on_done;
EXPECT_CALL(on_done, Call(absl::OkStatus()));
MakeActivity(
[&latch] {
return Seq(Join(latch.WaitAndCopy(), latch.WaitAndCopy(),
[&latch]() {
latch.Set(
"Once a jolly swagman camped by a billabong, "
"under the shade of a coolibah tree.");
return true;
}),
[](std::tuple<std::string, std::string, bool> result) {
EXPECT_EQ(std::get<0>(result),
"Once a jolly swagman camped by a billabong, "
"under the shade of a coolibah tree.");
EXPECT_EQ(std::get<1>(result),
"Once a jolly swagman camped by a billabong, "
"under the shade of a coolibah tree.");
return absl::OkStatus();
});
},
NoWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
TEST(LatchTest, Void) {
Latch<void> latch;
StrictMock<MockFunction<void(absl::Status)>> on_done;
@ -69,6 +96,23 @@ TEST(LatchTest, Void) {
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
TEST(LatchTest, ExternallyObservableVoid) {
ExternallyObservableLatch<void> latch;
StrictMock<MockFunction<void(absl::Status)>> on_done;
EXPECT_CALL(on_done, Call(absl::OkStatus()));
MakeActivity(
[&latch] {
return Seq(Join(latch.Wait(),
[&latch]() {
latch.Set();
return true;
}),
[](std::tuple<Empty, bool>) { return absl::OkStatus(); });
},
NoWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
}
} // namespace grpc_core
int main(int argc, char** argv) {

@ -14,6 +14,8 @@
#include "src/core/lib/promise/loop.h"
#include <utility>
#include "gtest/gtest.h"
#include "src/core/lib/promise/detail/basic_seq.h"
@ -49,6 +51,20 @@ TEST(LoopTest, LoopOfSeq) {
EXPECT_EQ(x, Poll<int>(42));
}
TEST(LoopTest, CanAccessFactoryLambdaVariables) {
int i = 0;
auto x = Loop([p = &i]() {
return [q = &p]() -> Poll<LoopCtl<int>> {
++**q;
return Pending{};
};
});
auto y = std::move(x);
auto z = std::move(y);
z();
EXPECT_EQ(i, 1);
}
} // namespace grpc_core
int main(int argc, char** argv) {

@ -14,8 +14,7 @@
#include "src/core/lib/promise/map.h"
#include <functional>
#include "absl/functional/any_invocable.h"
#include "gtest/gtest.h"
#include "src/core/lib/promise/promise.h"

@ -36,14 +36,14 @@ class MockActivity : public Activity, public Wakeable {
public:
MOCK_METHOD(void, WakeupRequested, ());
void ForceImmediateRepoll() override { WakeupRequested(); }
void ForceImmediateRepoll(WakeupMask) override { WakeupRequested(); }
void Orphan() override {}
Waker MakeOwningWaker() override { return Waker(this, nullptr); }
Waker MakeNonOwningWaker() override { return Waker(this, nullptr); }
void Wakeup(void*) override { WakeupRequested(); }
void Drop(void*) override {}
Waker MakeOwningWaker() override { return Waker(this, 0); }
Waker MakeNonOwningWaker() override { return Waker(this, 0); }
void Wakeup(WakeupMask) override { WakeupRequested(); }
void Drop(WakeupMask) override {}
std::string DebugTag() const override { return "MockActivity"; }
std::string ActivityDebugTag(void*) const override { return DebugTag(); }
std::string ActivityDebugTag(WakeupMask) const override { return DebugTag(); }
void Activate() {
if (scoped_activity_ != nullptr) return;

@ -1,134 +0,0 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/promise/observable.h"
#include <functional>
#include "absl/status/status.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "src/core/lib/promise/promise.h"
#include "src/core/lib/promise/seq.h"
#include "test/core/promise/test_wakeup_schedulers.h"
using testing::MockFunction;
using testing::StrictMock;
namespace grpc_core {
// A simple Barrier type: stalls progress until it is 'cleared'.
class Barrier {
public:
struct Result {};
Promise<Result> Wait() {
return [this]() -> Poll<Result> {
MutexLock lock(&mu_);
if (cleared_) {
return Result{};
} else {
return wait_set_.AddPending(Activity::current()->MakeOwningWaker());
}
};
}
void Clear() {
mu_.Lock();
cleared_ = true;
auto wakeup = wait_set_.TakeWakeupSet();
mu_.Unlock();
wakeup.Wakeup();
}
private:
Mutex mu_;
WaitSet wait_set_ ABSL_GUARDED_BY(mu_);
bool cleared_ ABSL_GUARDED_BY(mu_) = false;
};
TEST(ObservableTest, CanPushAndGet) {
StrictMock<MockFunction<void(absl::Status)>> on_done;
Observable<int> observable;
auto observer = observable.MakeObserver();
auto activity = MakeActivity(
[&observer]() {
return Seq(observer.Get(), [](absl::optional<int> i) {
return i == 42 ? absl::OkStatus() : absl::UnknownError("expected 42");
});
},
InlineWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
EXPECT_CALL(on_done, Call(absl::OkStatus()));
observable.Push(42);
}
TEST(ObservableTest, CanNext) {
StrictMock<MockFunction<void(absl::Status)>> on_done;
Observable<int> observable;
auto observer = observable.MakeObserver();
auto activity = MakeActivity(
[&observer]() {
return Seq(
observer.Get(),
[&observer](absl::optional<int> i) {
EXPECT_EQ(i, 42);
return observer.Next();
},
[](absl::optional<int> i) {
return i == 1 ? absl::OkStatus()
: absl::UnknownError("expected 1");
});
},
InlineWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
observable.Push(42);
EXPECT_CALL(on_done, Call(absl::OkStatus()));
observable.Push(1);
}
TEST(ObservableTest, CanWatch) {
StrictMock<MockFunction<void(absl::Status)>> on_done;
Observable<int> observable;
Barrier barrier;
auto activity = MakeActivity(
[&observable, &barrier]() {
return observable.Watch(
[&barrier](int x,
WatchCommitter* committer) -> Promise<absl::Status> {
if (x == 3) {
committer->Commit();
return Seq(barrier.Wait(), Immediate(absl::OkStatus()));
} else {
return Never<absl::Status>();
}
});
},
InlineWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); });
observable.Push(1);
observable.Push(2);
observable.Push(3);
observable.Push(4);
EXPECT_CALL(on_done, Call(absl::OkStatus()));
barrier.Clear();
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -28,7 +28,6 @@
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gprpp/notification.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/time.h"
@ -44,19 +43,34 @@ namespace grpc_core {
class AllocatorOwner {
protected:
~AllocatorOwner() { arena_->Destroy(); }
MemoryAllocator memory_allocator_ = MemoryAllocator(
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator("test"));
Arena* arena_ = Arena::Create(1024, &memory_allocator_);
};
class TestParty final : public AllocatorOwner, public Party {
public:
TestParty() : Party(Arena::Create(1024, &memory_allocator_)) {}
TestParty() : Party(AllocatorOwner::arena_, 1) {}
~TestParty() override {}
std::string DebugTag() const override { return "TestParty"; }
void Run() override {
using Party::IncrementRefCount;
using Party::Unref;
bool RunParty() override {
promise_detail::Context<grpc_event_engine::experimental::EventEngine>
ee_ctx(ee_.get());
Party::Run();
return Party::RunParty();
}
void PartyOver() override {
{
promise_detail::Context<grpc_event_engine::experimental::EventEngine>
ee_ctx(ee_.get());
CancelRemainingParticipants();
}
delete this;
}
private:
@ -68,14 +82,17 @@ class PartyTest : public ::testing::Test {
protected:
};
TEST_F(PartyTest, Noop) { auto party = MakeOrphanable<TestParty>(); }
TEST_F(PartyTest, Noop) { auto party = MakeRefCounted<TestParty>(); }
TEST_F(PartyTest, CanSpawnAndRun) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
party->Spawn(
"TestSpawn",
[i = 10]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
gpr_log(GPR_DEBUG, "i=%d", i);
GPR_ASSERT(i > 0);
Activity::current()->ForceImmediateRepoll();
--i;
if (i == 0) return 42;
@ -89,13 +106,15 @@ TEST_F(PartyTest, CanSpawnAndRun) {
}
TEST_F(PartyTest, CanSpawnFromSpawn) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done1 = false;
bool done2 = false;
party->Spawn(
[party = party.get(), &done2]() -> Poll<int> {
"TestSpawn",
[party, &done2]() -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
party->Spawn(
"TestSpawnInner",
[i = 10]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
Activity::current()->ForceImmediateRepoll();
@ -118,10 +137,11 @@ TEST_F(PartyTest, CanSpawnFromSpawn) {
}
TEST_F(PartyTest, CanWakeupWithOwningWaker) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
"TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeOwningWaker();
@ -141,10 +161,11 @@ TEST_F(PartyTest, CanWakeupWithOwningWaker) {
}
TEST_F(PartyTest, CanWakeupWithNonOwningWaker) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
"TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@ -164,10 +185,11 @@ TEST_F(PartyTest, CanWakeupWithNonOwningWaker) {
}
TEST_F(PartyTest, CanWakeupWithNonOwningWakerAfterOrphaning) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
"TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@ -188,10 +210,11 @@ TEST_F(PartyTest, CanWakeupWithNonOwningWakerAfterOrphaning) {
}
TEST_F(PartyTest, CanDropNonOwningWakeAfterOrphaning) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
std::unique_ptr<Waker> waker;
party->Spawn(
"TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker =
@ -211,10 +234,11 @@ TEST_F(PartyTest, CanDropNonOwningWakeAfterOrphaning) {
}
TEST_F(PartyTest, CanWakeupNonOwningOrphanedWakerWithNoEffect) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
bool done = false;
Waker waker;
party->Spawn(
"TestSpawn",
[i = 10, &waker]() mutable -> Poll<int> {
EXPECT_EQ(Activity::current()->DebugTag(), "TestParty");
waker = Activity::current()->MakeNonOwningWaker();
@ -235,15 +259,16 @@ TEST_F(PartyTest, CanWakeupNonOwningOrphanedWakerWithNoEffect) {
}
TEST_F(PartyTest, ThreadStressTest) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(16);
for (int i = 0; i < 16; i++) {
threads.emplace_back([party = party.get()]() {
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
Notification promise_complete;
party->Spawn(Seq(Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
party->Spawn("TestSpawn",
Seq(Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@ -298,16 +323,17 @@ class PromiseNotification {
};
TEST_F(PartyTest, ThreadStressTestWithOwningWaker) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(16);
for (int i = 0; i < 16; i++) {
threads.emplace_back([party = party.get()]() {
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification promise_start(true);
Notification promise_complete;
party->Spawn(Seq(promise_start.Wait(),
party->Spawn("TestSpawn",
Seq(promise_start.Wait(),
Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
@ -325,16 +351,17 @@ TEST_F(PartyTest, ThreadStressTestWithOwningWaker) {
}
TEST_F(PartyTest, ThreadStressTestWithNonOwningWaker) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(16);
for (int i = 0; i < 16; i++) {
threads.emplace_back([party = party.get()]() {
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification promise_start(false);
Notification promise_complete;
party->Spawn(Seq(promise_start.Wait(),
party->Spawn("TestSpawn",
Seq(promise_start.Wait(),
Sleep(Timestamp::Now() + Duration::Milliseconds(10)),
[]() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
@ -352,15 +379,16 @@ TEST_F(PartyTest, ThreadStressTestWithNonOwningWaker) {
}
TEST_F(PartyTest, ThreadStressTestWithOwningWakerNoSleep) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(16);
for (int i = 0; i < 16; i++) {
threads.emplace_back([party = party.get()]() {
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party]() {
for (int i = 0; i < 10000; i++) {
PromiseNotification promise_start(true);
Notification promise_complete;
party->Spawn(
"TestSpawn",
Seq(promise_start.Wait(), []() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@ -377,15 +405,16 @@ TEST_F(PartyTest, ThreadStressTestWithOwningWakerNoSleep) {
}
TEST_F(PartyTest, ThreadStressTestWithNonOwningWakerNoSleep) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(16);
for (int i = 0; i < 16; i++) {
threads.emplace_back([party = party.get()]() {
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party]() {
for (int i = 0; i < 10000; i++) {
PromiseNotification promise_start(false);
Notification promise_complete;
party->Spawn(
"TestSpawn",
Seq(promise_start.Wait(), []() -> Poll<int> { return 42; }),
[&promise_complete](int i) {
EXPECT_EQ(i, 42);
@ -402,20 +431,22 @@ TEST_F(PartyTest, ThreadStressTestWithNonOwningWakerNoSleep) {
}
TEST_F(PartyTest, ThreadStressTestWithInnerSpawn) {
auto party = MakeOrphanable<TestParty>();
auto party = MakeRefCounted<TestParty>();
std::vector<std::thread> threads;
threads.reserve(8);
for (int i = 0; i < 8; i++) {
threads.emplace_back([party = party.get()]() {
threads.emplace_back([party]() {
for (int i = 0; i < 100; i++) {
ExecCtx ctx; // needed for Sleep
PromiseNotification inner_start(true);
PromiseNotification inner_complete(false);
Notification promise_complete;
party->Spawn(
"TestSpawn",
Seq(
[party, &inner_start, &inner_complete]() -> Poll<int> {
party->Spawn(Seq(inner_start.Wait(), []() { return 0; }),
party->Spawn("TestSpawnInner",
Seq(inner_start.Wait(), []() { return 0; }),
[&inner_complete](int i) {
EXPECT_EQ(i, 0);
inner_complete.Notify();

@ -19,6 +19,7 @@
#include <tuple>
#include <utility>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@ -26,6 +27,7 @@
#include <grpc/event_engine/memory_allocator.h>
#include <grpc/grpc.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/detail/basic_join.h"
@ -381,6 +383,58 @@ TEST_F(PipeTest, CanFlowControlThroughManyStages) {
ASSERT_TRUE(*done);
}
TEST_F(PipeTest, AwaitClosedWorks) {
StrictMock<MockFunction<void(absl::Status)>> on_done;
EXPECT_CALL(on_done, Call(absl::OkStatus()));
MakeActivity(
[] {
auto* pipe = GetContext<Arena>()->ManagedNew<Pipe<int>>();
pipe->sender.InterceptAndMap([](int value) { return value + 1; });
return Seq(
// Concurrently:
// - wait for closed on both ends
// - close the sender, which will signal the receiver to return an
// end-of-stream.
Join(pipe->receiver.AwaitClosed(), pipe->sender.AwaitClosed(),
[pipe]() mutable {
pipe->sender.Close();
return absl::OkStatus();
}),
// Verify we received end-of-stream and closed the sender.
[](std::tuple<bool, bool, absl::Status> result) {
EXPECT_FALSE(std::get<0>(result));
EXPECT_FALSE(std::get<1>(result));
EXPECT_EQ(std::get<2>(result), absl::OkStatus());
return absl::OkStatus();
});
},
NoWakeupScheduler(),
[&on_done](absl::Status status) { on_done.Call(std::move(status)); },
MakeScopedArena(1024, &memory_allocator_));
}
class FakeActivity final : public Activity {
public:
void Orphan() override {}
void ForceImmediateRepoll(WakeupMask) override {}
Waker MakeOwningWaker() override { Crash("Not implemented"); }
Waker MakeNonOwningWaker() override { Crash("Not implemented"); }
void Run(absl::FunctionRef<void()> f) {
ScopedActivity activity(this);
f();
}
};
TEST_F(PipeTest, PollAckWaitsForReadyClosed) {
FakeActivity().Run([]() {
pipe_detail::Center<int> c;
int i = 1;
EXPECT_EQ(c.Push(&i), Poll<bool>(true));
c.MarkClosed();
EXPECT_EQ(c.PollAck(), Poll<bool>(Pending{}));
});
}
} // namespace grpc_core
int main(int argc, char** argv) {

@ -14,13 +14,10 @@
#include "src/core/lib/promise/detail/promise_factory.h"
#include <functional>
#include "absl/functional/bind_front.h"
#include "gtest/gtest.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/promise.h"
namespace grpc_core {
namespace promise_detail {
@ -43,13 +40,12 @@ TEST(AdaptorTest, FactoryFromPromise) {
return Poll<int>(Poll<int>(42));
}).Make()(),
Poll<int>(42));
EXPECT_EQ(MakeOnceFactory<void>(Promise<int>([]() {
return Poll<int>(Poll<int>(42));
})).Make()(),
Poll<int>(42));
EXPECT_EQ(MakeRepeatedFactory<void>(Promise<int>([]() {
EXPECT_EQ(
MakeOnceFactory<void>([]() { return Poll<int>(Poll<int>(42)); }).Make()(),
Poll<int>(42));
EXPECT_EQ(MakeRepeatedFactory<void>([]() {
return Poll<int>(Poll<int>(42));
})).Make()(),
}).Make()(),
Poll<int>(42));
}

@ -19,6 +19,7 @@
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"

@ -22,6 +22,7 @@
#include <string.h>
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <ostream>
#include <string>

@ -418,17 +418,10 @@ void Destroy(grpc_transport* /*self*/) {}
// implementation of grpc_transport_get_endpoint
grpc_endpoint* GetEndpoint(grpc_transport* /*self*/) { return nullptr; }
static const grpc_transport_vtable phony_transport_vtable = {0,
"phony_http2",
InitStream,
nullptr,
SetPollset,
SetPollsetSet,
PerformStreamOp,
PerformOp,
DestroyStream,
Destroy,
GetEndpoint};
static const grpc_transport_vtable phony_transport_vtable = {
0, false, "phony_http2", InitStream,
nullptr, SetPollset, SetPollsetSet, PerformStreamOp,
PerformOp, DestroyStream, Destroy, GetEndpoint};
static grpc_transport phony_transport = {&phony_transport_vtable};

@ -0,0 +1,132 @@
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE:
# Run some tests with the GRPC_ARENA_TRACE_POOLED_ALLOCATIONS #define turned on.
# Capture the output to a text file.
# Invoke this program with that as an argument, and let it work its magic.
import collections
import heapq
import random
import re
import sys
# A single allocation, negative size => free
Allocation = collections.namedtuple('Allocation', 'size ptr')
Active = collections.namedtuple('Active', 'id size')
# Read through all the captures, and build up scrubbed traces
arenas = []
building = collections.defaultdict(list)
active = {}
biggest = 0
smallest = 1024
sizes = set()
for filename in sys.argv[1:]:
for line in open(filename):
m = re.search(r'ARENA 0x([0-9a-f]+) ALLOC ([0-9]+) @ 0x([0-9a-f]+)',
line)
if m:
size = int(m.group(2))
if size > biggest:
biggest = size
if size < smallest:
smallest = size
active[m.group(3)] = Active(m.group(1), size)
building[m.group(1)].append(size)
sizes.add(size)
m = re.search(r'FREE 0x([0-9a-f]+)', line)
if m:
# We may have spurious frees, so make sure there's an outstanding allocation
last = active.pop(m.group(1), None)
if last is not None:
building[last.id].append(-last.size)
m = re.search(r'DESTRUCT_ARENA 0x([0-9a-f]+)', line)
if m:
trace = building.pop(m.group(1), None)
if trace:
arenas.append(trace)
# Given a list of pool sizes, return which bucket an allocation should go into
def bucket(pool_sizes, size):
for bucket in sorted(pool_sizes):
if abs(size) <= bucket:
return bucket
# Given a list of pool sizes, determine the total outstanding bytes in the arena for once trace
def outstanding_bytes(pool_sizes, trace):
free_list = collections.defaultdict(int)
allocated = 0
for size in trace:
b = bucket(pool_sizes, size)
if size < 0:
free_list[b] += 1
else:
if free_list[b] > 0:
free_list[b] -= 1
else:
allocated += b
return allocated + len(pool_sizes) * 8
# Given a list of pool sizes, determine the maximum outstanding bytes for any seen trace
def measure(pool_sizes):
max_outstanding = 0
for trace in arenas:
max_outstanding = max(max_outstanding,
outstanding_bytes(pool_sizes, trace))
return max_outstanding
ALWAYS_INCLUDE = 1024
best = [ALWAYS_INCLUDE, biggest]
best_measure = measure(best)
testq = []
step = 0
def add(l):
global testq, best_measure, best
m = measure(l)
if m < best_measure:
best_measure = m
best = l
if l[-1] == smallest:
return
heapq.heappush(testq, (m, l))
add(best)
while testq:
top = heapq.heappop(testq)[1]
m = measure(top)
step += 1
if step % 1000 == 0:
print("iter %d; pending=%d; top=%r/%d" %
(step, len(testq), top, measure(top)))
for i in sizes:
if i >= top[-1]:
continue
add(top + [i])
print("SAW SIZES: %r" % sorted(list(sizes)))
print("BEST: %r" % list(reversed(best)))
print("BEST MEASURE: %d" % best_measure)

@ -2399,12 +2399,14 @@ src/core/lib/promise/detail/promise_like.h \
src/core/lib/promise/detail/status.h \
src/core/lib/promise/detail/switch.h \
src/core/lib/promise/exec_ctx_wakeup_scheduler.h \
src/core/lib/promise/for_each.h \
src/core/lib/promise/if.h \
src/core/lib/promise/interceptor_list.h \
src/core/lib/promise/intra_activity_waiter.h \
src/core/lib/promise/latch.h \
src/core/lib/promise/loop.h \
src/core/lib/promise/map.h \
src/core/lib/promise/party.cc \
src/core/lib/promise/party.h \
src/core/lib/promise/pipe.h \
src/core/lib/promise/poll.h \
src/core/lib/promise/promise.h \
@ -2611,6 +2613,8 @@ src/core/lib/surface/server.h \
src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/validate_metadata.h \
src/core/lib/surface/version.cc \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/batch_builder.h \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/bdp_estimator.h \
src/core/lib/transport/connectivity_state.cc \

@ -2180,12 +2180,14 @@ src/core/lib/promise/detail/promise_like.h \
src/core/lib/promise/detail/status.h \
src/core/lib/promise/detail/switch.h \
src/core/lib/promise/exec_ctx_wakeup_scheduler.h \
src/core/lib/promise/for_each.h \
src/core/lib/promise/if.h \
src/core/lib/promise/interceptor_list.h \
src/core/lib/promise/intra_activity_waiter.h \
src/core/lib/promise/latch.h \
src/core/lib/promise/loop.h \
src/core/lib/promise/map.h \
src/core/lib/promise/party.cc \
src/core/lib/promise/party.h \
src/core/lib/promise/pipe.h \
src/core/lib/promise/poll.h \
src/core/lib/promise/promise.h \
@ -2394,6 +2396,8 @@ src/core/lib/surface/validate_metadata.cc \
src/core/lib/surface/validate_metadata.h \
src/core/lib/surface/version.cc \
src/core/lib/transport/README.md \
src/core/lib/transport/batch_builder.cc \
src/core/lib/transport/batch_builder.h \
src/core/lib/transport/bdp_estimator.cc \
src/core/lib/transport/bdp_estimator.h \
src/core/lib/transport/connectivity_state.cc \

@ -5043,30 +5043,6 @@
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "observable_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,

Loading…
Cancel
Save