Merge master

pull/37386/head
tanvi-jagtap 4 months ago
commit 042e53f096
  1. 3
      BUILD
  2. 35
      CMakeLists.txt
  3. 1
      Package.swift
  4. 12
      bazel/grpc_deps.bzl
  5. 15
      build_autogenerated.yaml
  6. 1
      examples/cpp/otel/codelab/greeter_callback_client_solution.cc
  7. 2
      gRPC-C++.podspec
  8. 2
      gRPC-Core.podspec
  9. 1
      grpc.gemspec
  10. 1
      package.xml
  11. 11
      src/core/BUILD
  12. 41
      src/core/ext/transport/chaotic_good/client_transport.cc
  13. 3
      src/core/ext/transport/chaotic_good/frame.h
  14. 58
      src/core/ext/transport/chaotic_good/server_transport.cc
  15. 5
      src/core/ext/transport/chaotic_good/server_transport.h
  16. 6
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  17. 42
      src/core/ext/transport/chttp2/transport/hpack_parser_table.cc
  18. 33
      src/core/ext/transport/chttp2/transport/hpack_parser_table.h
  19. 9
      src/core/handshaker/security/secure_endpoint.cc
  20. 131
      src/core/handshaker/security/security_handshaker.cc
  21. 93
      src/core/lib/channel/promise_based_filter.cc
  22. 2
      src/core/lib/event_engine/ares_resolver.cc
  23. 8
      src/core/lib/experiments/experiments.yaml
  24. 21
      src/core/lib/gprpp/work_serializer.cc
  25. 6
      src/core/lib/iomgr/ev_epoll1_linux.cc
  26. 7
      src/core/lib/iomgr/event_engine_shims/endpoint.cc
  27. 12
      src/core/lib/iomgr/polling_entity.cc
  28. 20
      src/core/lib/iomgr/tcp_client_posix.cc
  29. 31
      src/core/lib/iomgr/tcp_posix.cc
  30. 13
      src/core/lib/iomgr/tcp_server_posix.cc
  31. 7
      src/core/lib/resource_quota/memory_quota.cc
  32. 6
      src/core/lib/resource_quota/memory_quota.h
  33. 13
      src/core/lib/security/authorization/grpc_server_authz_filter.cc
  34. 28
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  35. 14
      src/core/lib/slice/slice_refcount.h
  36. 6
      src/core/lib/surface/call.cc
  37. 7
      src/core/lib/transport/bdp_estimator.cc
  38. 14
      src/core/lib/transport/bdp_estimator.h
  39. 36
      src/core/lib/transport/call_spine.h
  40. 21
      src/core/lib/transport/connectivity_state.cc
  41. 35
      src/core/load_balancing/grpclb/grpclb.cc
  42. 14
      src/core/load_balancing/health_check_client.cc
  43. 6
      src/core/load_balancing/oob_backend_metric.cc
  44. 77
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  45. 88
      src/core/load_balancing/pick_first/pick_first.cc
  46. 35
      src/core/load_balancing/priority/priority.cc
  47. 7
      src/core/load_balancing/ring_hash/ring_hash.cc
  48. 121
      src/core/load_balancing/rls/rls.cc
  49. 28
      src/core/load_balancing/round_robin/round_robin.cc
  50. 74
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  51. 35
      src/core/load_balancing/weighted_target/weighted_target.cc
  52. 20
      src/core/load_balancing/xds/cds.cc
  53. 42
      src/core/load_balancing/xds/xds_cluster_impl.cc
  54. 21
      src/core/load_balancing/xds/xds_cluster_manager.cc
  55. 120
      src/core/load_balancing/xds/xds_override_host.cc
  56. 14
      src/core/load_balancing/xds/xds_wrr_locality.cc
  57. 29
      src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc
  58. 67
      src/core/telemetry/stats_data.cc
  59. 48
      src/core/telemetry/stats_data.h
  60. 8
      src/core/telemetry/stats_data.yaml
  61. 3
      src/core/tsi/fake_transport_security.cc
  62. 86
      src/core/util/unique_ptr_with_bitset.h
  63. 5
      src/core/xds/xds_client/xds_client.cc
  64. 13
      src/ruby/lib/grpc/generic/active_call.rb
  65. 93
      src/ruby/spec/call_spec.rb
  66. 6
      src/ruby/spec/channel_spec.rb
  67. 655
      src/ruby/spec/client_server_spec.rb
  68. 150
      src/ruby/spec/generic/active_call_spec.rb
  69. 3
      src/ruby/spec/support/services.rb
  70. 76
      test/core/end2end/fuzzers/BUILD
  71. 189
      test/core/end2end/fuzzers/connector_fuzzer.cc
  72. 34
      test/core/end2end/fuzzers/connector_fuzzer.h
  73. 30
      test/core/end2end/fuzzers/connector_fuzzer_chttp2.cc
  74. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_corpus/empty
  75. 36
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec.cc
  76. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec_corpus/empty
  77. 23
      test/core/end2end/fuzzers/fuzzer_input.proto
  78. 50
      test/core/end2end/fuzzers/network_input.cc
  79. 6
      test/core/end2end/fuzzers/network_input.h
  80. 30
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds.cc
  81. 1
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds_corpus/empty
  82. 2
      test/core/end2end/tests/cancel_after_client_done.cc
  83. 75
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.cc
  84. 39
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.h
  85. 55
      test/core/transport/chttp2/hpack_parser_table_test.cc
  86. 13
      test/core/util/BUILD
  87. 60
      test/core/util/unique_ptr_with_bitset_test.cc
  88. 1
      tools/bazelify_tests/dockerimage_current_versions.bzl
  89. 4
      tools/codegen/core/gen_stats_data.py
  90. 1
      tools/dockerfile/distribtest/ruby_centos7_x64.current_version
  91. 33
      tools/dockerfile/distribtest/ruby_centos7_x64/Dockerfile
  92. 1
      tools/doxygen/Doxyfile.c++.internal
  93. 1
      tools/doxygen/Doxyfile.core.internal
  94. 39
      tools/interop_matrix/README.md
  95. 12
      tools/interop_matrix/create_matrix_images.py
  96. 10
      tools/interop_matrix/run_interop_matrix_tests.py
  97. 7
      tools/remote_build/include/rbe_remote_execution.bazelrc
  98. 1
      tools/run_tests/artifacts/distribtest_targets.py
  99. 24
      tools/run_tests/generated/tests.json

@ -4035,6 +4035,7 @@ grpc_cc_library(
deps = [ deps = [
"gpr", "gpr",
"tsi_base", "tsi_base",
"//src/core:dump_args",
"//src/core:slice", "//src/core:slice",
"//src/core:useful", "//src/core:useful",
], ],
@ -4569,11 +4570,13 @@ grpc_cc_library(
"gpr_platform", "gpr_platform",
"grpc_trace", "grpc_trace",
"hpack_parse_result", "hpack_parse_result",
"stats",
"//src/core:hpack_constants", "//src/core:hpack_constants",
"//src/core:metadata_batch", "//src/core:metadata_batch",
"//src/core:no_destruct", "//src/core:no_destruct",
"//src/core:parsed_metadata", "//src/core:parsed_metadata",
"//src/core:slice", "//src/core:slice",
"//src/core:unique_ptr_with_bitset",
], ],
) )

35
CMakeLists.txt generated

@ -1527,6 +1527,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx try_join_test) add_dependencies(buildtests_cxx try_join_test)
add_dependencies(buildtests_cxx try_seq_metadata_test) add_dependencies(buildtests_cxx try_seq_metadata_test)
add_dependencies(buildtests_cxx try_seq_test) add_dependencies(buildtests_cxx try_seq_test)
add_dependencies(buildtests_cxx unique_ptr_with_bitset_test)
add_dependencies(buildtests_cxx unique_type_name_test) add_dependencies(buildtests_cxx unique_type_name_test)
add_dependencies(buildtests_cxx unknown_frame_bad_client_test) add_dependencies(buildtests_cxx unknown_frame_bad_client_test)
add_dependencies(buildtests_cxx uri_parser_test) add_dependencies(buildtests_cxx uri_parser_test)
@ -32484,6 +32485,40 @@ target_link_libraries(try_seq_test
) )
endif()
if(gRPC_BUILD_TESTS)
add_executable(unique_ptr_with_bitset_test
test/core/util/unique_ptr_with_bitset_test.cc
)
target_compile_features(unique_ptr_with_bitset_test PUBLIC cxx_std_14)
target_include_directories(unique_ptr_with_bitset_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(unique_ptr_with_bitset_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
absl::check
absl::bits
)
endif() endif()
if(gRPC_BUILD_TESTS) if(gRPC_BUILD_TESTS)

1
Package.swift generated

@ -1950,6 +1950,7 @@ let package = Package(
"src/core/util/time_precise.cc", "src/core/util/time_precise.cc",
"src/core/util/time_precise.h", "src/core/util/time_precise.h",
"src/core/util/tmpfile.h", "src/core/util/tmpfile.h",
"src/core/util/unique_ptr_with_bitset.h",
"src/core/util/upb_utils.h", "src/core/util/upb_utils.h",
"src/core/util/useful.h", "src/core/util/useful.h",
"src/core/util/windows/cpu.cc", "src/core/util/windows/cpu.cc",

@ -23,10 +23,10 @@ def grpc_deps():
if "platforms" not in native.existing_rules(): if "platforms" not in native.existing_rules():
http_archive( http_archive(
name = "platforms", name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74", sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [ urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz", "https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz", "https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
], ],
) )
@ -168,10 +168,10 @@ def grpc_deps():
http_archive( http_archive(
name = "bazel_skylib", name = "bazel_skylib",
urls = [ urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
], ],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f",
) )
if "bazel_compdb" not in native.existing_rules(): if "bazel_compdb" not in native.existing_rules():

@ -1219,6 +1219,7 @@ libs:
- src/core/util/latent_see.h - src/core/util/latent_see.h
- src/core/util/ring_buffer.h - src/core/util/ring_buffer.h
- src/core/util/spinlock.h - src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h - src/core/util/upb_utils.h
- src/core/xds/grpc/certificate_provider_store.h - src/core/xds/grpc/certificate_provider_store.h
- src/core/xds/grpc/file_watcher_certificate_provider_factory.h - src/core/xds/grpc/file_watcher_certificate_provider_factory.h
@ -2705,6 +2706,7 @@ libs:
- src/core/util/latent_see.h - src/core/util/latent_see.h
- src/core/util/ring_buffer.h - src/core/util/ring_buffer.h
- src/core/util/spinlock.h - src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h - src/core/util/upb_utils.h
- third_party/upb/upb/generated_code_support.h - third_party/upb/upb/generated_code_support.h
src: src:
@ -20491,6 +20493,19 @@ targets:
- absl/status:statusor - absl/status:statusor
- gpr - gpr
uses_polling: false uses_polling: false
- name: unique_ptr_with_bitset_test
gtest: true
build: test
language: c++
headers:
- src/core/util/unique_ptr_with_bitset.h
src:
- test/core/util/unique_ptr_with_bitset_test.cc
deps:
- gtest
- absl/log:check
- absl/numeric:bits
uses_polling: false
- name: unique_type_name_test - name: unique_type_name_test
gtest: true gtest: true
build: test build: test

@ -128,7 +128,6 @@ void RunClient(const std::string& target_str) {
int main(int argc, char** argv) { int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv); absl::ParseCommandLine(argc, argv);
// CODELAB HINT : Add code to register OpenTelemetry plugin here.
// Register a global gRPC OpenTelemetry plugin configured with a prometheus // Register a global gRPC OpenTelemetry plugin configured with a prometheus
// exporter. // exporter.
opentelemetry::exporter::metrics::PrometheusExporterOptions opts; opentelemetry::exporter::metrics::PrometheusExporterOptions opts;

2
gRPC-C++.podspec generated

@ -1326,6 +1326,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h', 'src/core/util/string.h',
'src/core/util/time_precise.h', 'src/core/util/time_precise.h',
'src/core/util/tmpfile.h', 'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h', 'src/core/util/upb_utils.h',
'src/core/util/useful.h', 'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h', 'src/core/xds/grpc/certificate_provider_store.h',
@ -2609,6 +2610,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h', 'src/core/util/string.h',
'src/core/util/time_precise.h', 'src/core/util/time_precise.h',
'src/core/util/tmpfile.h', 'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h', 'src/core/util/upb_utils.h',
'src/core/util/useful.h', 'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h', 'src/core/xds/grpc/certificate_provider_store.h',

2
gRPC-Core.podspec generated

@ -2066,6 +2066,7 @@ Pod::Spec.new do |s|
'src/core/util/time_precise.cc', 'src/core/util/time_precise.cc',
'src/core/util/time_precise.h', 'src/core/util/time_precise.h',
'src/core/util/tmpfile.h', 'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h', 'src/core/util/upb_utils.h',
'src/core/util/useful.h', 'src/core/util/useful.h',
'src/core/util/windows/cpu.cc', 'src/core/util/windows/cpu.cc',
@ -3389,6 +3390,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h', 'src/core/util/string.h',
'src/core/util/time_precise.h', 'src/core/util/time_precise.h',
'src/core/util/tmpfile.h', 'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h', 'src/core/util/upb_utils.h',
'src/core/util/useful.h', 'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h', 'src/core/xds/grpc/certificate_provider_store.h',

1
grpc.gemspec generated

@ -1952,6 +1952,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/util/time_precise.cc ) s.files += %w( src/core/util/time_precise.cc )
s.files += %w( src/core/util/time_precise.h ) s.files += %w( src/core/util/time_precise.h )
s.files += %w( src/core/util/tmpfile.h ) s.files += %w( src/core/util/tmpfile.h )
s.files += %w( src/core/util/unique_ptr_with_bitset.h )
s.files += %w( src/core/util/upb_utils.h ) s.files += %w( src/core/util/upb_utils.h )
s.files += %w( src/core/util/useful.h ) s.files += %w( src/core/util/useful.h )
s.files += %w( src/core/util/windows/cpu.cc ) s.files += %w( src/core/util/windows/cpu.cc )

1
package.xml generated

@ -1934,6 +1934,7 @@
<file baseinstalldir="/" name="src/core/util/time_precise.cc" role="src" /> <file baseinstalldir="/" name="src/core/util/time_precise.cc" role="src" />
<file baseinstalldir="/" name="src/core/util/time_precise.h" role="src" /> <file baseinstalldir="/" name="src/core/util/time_precise.h" role="src" />
<file baseinstalldir="/" name="src/core/util/tmpfile.h" role="src" /> <file baseinstalldir="/" name="src/core/util/tmpfile.h" role="src" />
<file baseinstalldir="/" name="src/core/util/unique_ptr_with_bitset.h" role="src" />
<file baseinstalldir="/" name="src/core/util/upb_utils.h" role="src" /> <file baseinstalldir="/" name="src/core/util/upb_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/util/useful.h" role="src" /> <file baseinstalldir="/" name="src/core/util/useful.h" role="src" />
<file baseinstalldir="/" name="src/core/util/windows/cpu.cc" role="src" /> <file baseinstalldir="/" name="src/core/util/windows/cpu.cc" role="src" />

@ -289,6 +289,17 @@ grpc_cc_library(
deps = ["//:gpr_platform"], deps = ["//:gpr_platform"],
) )
grpc_cc_library(
name = "unique_ptr_with_bitset",
hdrs = ["util/unique_ptr_with_bitset.h"],
external_deps = [
"absl/log:check",
"absl/numeric:bits",
],
language = "c++",
deps = ["//:gpr_platform"],
)
grpc_cc_library( grpc_cc_library(
name = "examine_stack", name = "examine_stack",
srcs = [ srcs = [

@ -254,7 +254,11 @@ uint32_t ChaoticGoodClientTransport::MakeStream(CallHandler call_handler) {
const uint32_t stream_id = next_stream_id_++; const uint32_t stream_id = next_stream_id_++;
stream_map_.emplace(stream_id, call_handler); stream_map_.emplace(stream_id, call_handler);
lock.Release(); lock.Release();
call_handler.OnDone([this, stream_id]() { call_handler.OnDone([this, stream_id](bool cancelled) {
if (cancelled) {
outgoing_frames_.MakeSender().UnbufferedImmediateSend(
CancelFrame{stream_id});
}
MutexLock lock(&mu_); MutexLock lock(&mu_);
stream_map_.erase(stream_id); stream_map_.erase(stream_id);
}); });
@ -317,24 +321,23 @@ void ChaoticGoodClientTransport::StartCall(CallHandler call_handler) {
"outbound_loop", [self = RefAsSubclass<ChaoticGoodClientTransport>(), "outbound_loop", [self = RefAsSubclass<ChaoticGoodClientTransport>(),
call_handler]() mutable { call_handler]() mutable {
const uint32_t stream_id = self->MakeStream(call_handler); const uint32_t stream_id = self->MakeStream(call_handler);
return Map(self->CallOutboundLoop(stream_id, call_handler), return Map(
[stream_id, sender = self->outgoing_frames_.MakeSender()]( self->CallOutboundLoop(stream_id, call_handler),
absl::Status result) mutable { [stream_id, sender = self->outgoing_frames_.MakeSender()](
GRPC_TRACE_LOG(chaotic_good, INFO) absl::Status result) mutable {
<< "CHAOTIC_GOOD: Call " << stream_id GRPC_TRACE_LOG(chaotic_good, INFO)
<< " finished with " << result.ToString(); << "CHAOTIC_GOOD: Call " << stream_id << " finished with "
if (!result.ok()) { << result.ToString();
GRPC_TRACE_LOG(chaotic_good, INFO) if (!result.ok()) {
<< "CHAOTIC_GOOD: Send cancel"; GRPC_TRACE_LOG(chaotic_good, INFO)
CancelFrame frame; << "CHAOTIC_GOOD: Send cancel";
frame.stream_id = stream_id; if (!sender.UnbufferedImmediateSend(CancelFrame{stream_id})) {
if (!sender.UnbufferedImmediateSend(std::move(frame))) { GRPC_TRACE_LOG(chaotic_good, INFO)
GRPC_TRACE_LOG(chaotic_good, INFO) << "CHAOTIC_GOOD: Send cancel failed";
<< "CHAOTIC_GOOD: Send cancel failed"; }
} }
} return result;
return result; });
});
}); });
} }

@ -156,6 +156,9 @@ struct ServerFragmentFrame final : public FrameInterface {
}; };
struct CancelFrame final : public FrameInterface { struct CancelFrame final : public FrameInterface {
CancelFrame() = default;
explicit CancelFrame(uint32_t stream_id) : stream_id(stream_id) {}
absl::Status Deserialize(HPackParser* parser, const FrameHeader& header, absl::Status Deserialize(HPackParser* parser, const FrameHeader& header,
absl::BitGenRef bitsrc, Arena* arena, absl::BitGenRef bitsrc, Arena* arena,
BufferPair buffers, FrameLimits limits) override; BufferPair buffers, FrameLimits limits) override;

@ -72,8 +72,7 @@ auto ChaoticGoodServerTransport::TransportWriteLoop(
} }
auto ChaoticGoodServerTransport::PushFragmentIntoCall( auto ChaoticGoodServerTransport::PushFragmentIntoCall(
CallInitiator call_initiator, ClientFragmentFrame frame, CallInitiator call_initiator, ClientFragmentFrame frame) {
uint32_t stream_id) {
DCHECK(frame.headers == nullptr); DCHECK(frame.headers == nullptr);
GRPC_TRACE_LOG(chaotic_good, INFO) GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString(); << "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString();
@ -84,17 +83,15 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
std::move(frame.message->message)); std::move(frame.message->message));
}, },
[]() -> StatusFlag { return Success{}; }), []() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream, [call_initiator, end_of_stream = frame.end_of_stream](
stream_id](StatusFlag status) mutable -> StatusFlag { StatusFlag status) mutable -> StatusFlag {
if (!status.ok() && GRPC_TRACE_FLAG_ENABLED(chaotic_good)) { if (!status.ok() && GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: Failed PushFragmentIntoCall"; LOG(INFO) << "CHAOTIC_GOOD: Failed PushFragmentIntoCall";
} }
if (end_of_stream || !status.ok()) { if (end_of_stream || !status.ok()) {
call_initiator.FinishSends(); call_initiator.FinishSends();
// We have received end_of_stream. It is now safe to remove // Note that we cannot remove from the stream map yet, as we
// the call from the stream map. // may yet receive a cancellation.
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
} }
return Success{}; return Success{};
}); });
@ -102,17 +99,16 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall( auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
absl::optional<CallInitiator> call_initiator, absl::Status error, absl::optional<CallInitiator> call_initiator, absl::Status error,
ClientFragmentFrame frame, uint32_t stream_id) { ClientFragmentFrame frame) {
return If( return If(
call_initiator.has_value() && error.ok(), call_initiator.has_value() && error.ok(),
[this, &call_initiator, &frame, &stream_id]() { [this, &call_initiator, &frame]() {
return Map( return Map(
call_initiator->SpawnWaitable( call_initiator->SpawnWaitable(
"push-fragment", "push-fragment",
[call_initiator, frame = std::move(frame), stream_id, [call_initiator, frame = std::move(frame), this]() mutable {
this]() mutable { return call_initiator->CancelIfFails(
return call_initiator->CancelIfFails(PushFragmentIntoCall( PushFragmentIntoCall(*call_initiator, std::move(frame)));
*call_initiator, std::move(frame), stream_id));
}), }),
[](StatusFlag status) { return StatusCast<absl::Status>(status); }); [](StatusFlag status) { return StatusCast<absl::Status>(status); });
}, },
@ -255,8 +251,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToNewCall(
} }
} }
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status), return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame), std::move(fragment_frame));
frame_header.stream_id);
} }
auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall( auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
@ -271,8 +266,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
frame_header, std::move(buffers), arena, fragment_frame, frame_header, std::move(buffers), arena, fragment_frame,
FrameLimits{1024 * 1024 * 1024, aligned_bytes_ - 1}); FrameLimits{1024 * 1024 * 1024, aligned_bytes_ - 1});
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status), return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame), std::move(fragment_frame));
frame_header.stream_id);
} }
auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) { auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
@ -305,6 +299,10 @@ auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
[this, &frame_header]() { [this, &frame_header]() {
absl::optional<CallInitiator> call_initiator = absl::optional<CallInitiator> call_initiator =
ExtractStream(frame_header.stream_id); ExtractStream(frame_header.stream_id);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "Cancel stream " << frame_header.stream_id
<< (call_initiator.has_value() ? " (active)"
: " (not found)");
return If( return If(
call_initiator.has_value(), call_initiator.has_value(),
[&call_initiator]() { [&call_initiator]() {
@ -410,6 +408,8 @@ void ChaoticGoodServerTransport::AbortWithError() {
absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream( absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
uint32_t stream_id) { uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " LookupStream " << stream_id;
MutexLock lock(&mu_); MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id); auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt; if (it == stream_map_.end()) return absl::nullopt;
@ -418,6 +418,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream( absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
uint32_t stream_id) { uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " ExtractStream " << stream_id;
MutexLock lock(&mu_); MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id); auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt; if (it == stream_map_.end()) return absl::nullopt;
@ -428,6 +430,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
absl::Status ChaoticGoodServerTransport::NewStream( absl::Status ChaoticGoodServerTransport::NewStream(
uint32_t stream_id, CallInitiator call_initiator) { uint32_t stream_id, CallInitiator call_initiator) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " NewStream " << stream_id;
MutexLock lock(&mu_); MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id); auto it = stream_map_.find(stream_id);
if (it != stream_map_.end()) { if (it != stream_map_.end()) {
@ -437,10 +441,20 @@ absl::Status ChaoticGoodServerTransport::NewStream(
return absl::InternalError("Stream id is not increasing"); return absl::InternalError("Stream id is not increasing");
} }
stream_map_.emplace(stream_id, call_initiator); stream_map_.emplace(stream_id, call_initiator);
call_initiator.OnDone([this, stream_id]() { call_initiator.OnDone(
MutexLock lock(&mu_); [self = RefAsSubclass<ChaoticGoodServerTransport>(), stream_id](bool) {
stream_map_.erase(stream_id); GRPC_TRACE_LOG(chaotic_good, INFO)
}); << "CHAOTIC_GOOD " << self.get() << " OnDone " << stream_id;
absl::optional<CallInitiator> call_initiator =
self->ExtractStream(stream_id);
if (call_initiator.has_value()) {
auto c = std::move(*call_initiator);
c.SpawnInfallible("cancel", [c]() mutable {
c.Cancel();
return Empty{};
});
}
});
return absl::OkStatus(); return absl::OkStatus();
} }

@ -131,10 +131,9 @@ class ChaoticGoodServerTransport final : public ServerTransport {
FrameHeader frame_header, BufferPair buffers, FrameHeader frame_header, BufferPair buffers,
ChaoticGoodTransport& transport); ChaoticGoodTransport& transport);
auto MaybePushFragmentIntoCall(absl::optional<CallInitiator> call_initiator, auto MaybePushFragmentIntoCall(absl::optional<CallInitiator> call_initiator,
absl::Status error, ClientFragmentFrame frame, absl::Status error, ClientFragmentFrame frame);
uint32_t stream_id);
auto PushFragmentIntoCall(CallInitiator call_initiator, auto PushFragmentIntoCall(CallInitiator call_initiator,
ClientFragmentFrame frame, uint32_t stream_id); ClientFragmentFrame frame);
RefCountedPtr<UnstartedCallDestination> call_destination_; RefCountedPtr<UnstartedCallDestination> call_destination_;
const RefCountedPtr<CallArenaAllocator> call_arena_allocator_; const RefCountedPtr<CallArenaAllocator> call_arena_allocator_;

@ -713,7 +713,7 @@ class HPackParser::Parser {
LOG(INFO) << "HTTP:" << log_info_.stream_id << ":" << type << ":" LOG(INFO) << "HTTP:" << log_info_.stream_id << ":" << type << ":"
<< (log_info_.is_client ? "CLI" : "SVR") << ": " << (log_info_.is_client ? "CLI" : "SVR") << ": "
<< memento.md.DebugString() << memento.md.DebugString()
<< (memento.parse_status == nullptr << (memento.parse_status.get() == nullptr
? "" ? ""
: absl::StrCat( : absl::StrCat(
" (parse error: ", " (parse error: ",
@ -724,7 +724,7 @@ class HPackParser::Parser {
void EmitHeader(const HPackTable::Memento& md) { void EmitHeader(const HPackTable::Memento& md) {
// Pass up to the transport // Pass up to the transport
state_.frame_length += md.md.transport_size(); state_.frame_length += md.md.transport_size();
if (md.parse_status != nullptr) { if (md.parse_status.get() != nullptr) {
// Reject any requests with invalid metadata. // Reject any requests with invalid metadata.
input_->SetErrorAndContinueParsing(*md.parse_status); input_->SetErrorAndContinueParsing(*md.parse_status);
} }
@ -974,7 +974,7 @@ class HPackParser::Parser {
} else { } else {
const auto* memento = absl::get<const HPackTable::Memento*>(state_.key); const auto* memento = absl::get<const HPackTable::Memento*>(state_.key);
key_string = memento->md.key(); key_string = memento->md.key();
if (state_.field_error.ok() && memento->parse_status != nullptr) { if (state_.field_error.ok() && memento->parse_status.get() != nullptr) {
input_->SetErrorAndContinueParsing(*memento->parse_status); input_->SetErrorAndContinueParsing(*memento->parse_status);
} }
} }

@ -37,6 +37,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_parse_result.h" #include "src/core/ext/transport/chttp2/transport/hpack_parse_result.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/slice/slice.h" #include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
namespace grpc_core { namespace grpc_core {
@ -47,6 +48,10 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
return entries_.push_back(std::move(m)); return entries_.push_back(std::move(m));
} }
size_t index = (first_entry_ + num_entries_) % max_entries_; size_t index = (first_entry_ + num_entries_) % max_entries_;
if (timestamp_index_ == kNoTimestamp) {
timestamp_index_ = index;
timestamp_ = Timestamp::Now();
}
entries_[index] = std::move(m); entries_[index] = std::move(m);
++num_entries_; ++num_entries_;
} }
@ -54,12 +59,31 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
auto HPackTable::MementoRingBuffer::PopOne() -> Memento { auto HPackTable::MementoRingBuffer::PopOne() -> Memento {
CHECK_GT(num_entries_, 0u); CHECK_GT(num_entries_, 0u);
size_t index = first_entry_ % max_entries_; size_t index = first_entry_ % max_entries_;
if (index == timestamp_index_) {
global_stats().IncrementHttp2HpackEntryLifetime(
(Timestamp::Now() - timestamp_).millis());
timestamp_index_ = kNoTimestamp;
}
++first_entry_; ++first_entry_;
--num_entries_; --num_entries_;
return std::move(entries_[index]); auto& entry = entries_[index];
if (!entry.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
return std::move(entry);
} }
auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) const auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) -> const Memento* {
if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
auto& entry = entries_[offset];
const bool was_used = entry.parse_status.TestBit(Memento::kUsedBit);
entry.parse_status.SetBit(Memento::kUsedBit);
if (!was_used) global_stats().IncrementHttp2HpackHits();
return &entry;
}
auto HPackTable::MementoRingBuffer::Peek(uint32_t index) const
-> const Memento* { -> const Memento* {
if (index >= num_entries_) return nullptr; if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_; uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
@ -79,14 +103,22 @@ void HPackTable::MementoRingBuffer::Rebuild(uint32_t max_entries) {
entries_.swap(entries); entries_.swap(entries);
} }
void HPackTable::MementoRingBuffer::ForEach( template <typename F>
absl::FunctionRef<void(uint32_t, const Memento&)> f) const { void HPackTable::MementoRingBuffer::ForEach(F f) const {
uint32_t index = 0; uint32_t index = 0;
while (auto* m = Lookup(index++)) { while (auto* m = Peek(index++)) {
f(index, *m); f(index, *m);
} }
} }
HPackTable::MementoRingBuffer::~MementoRingBuffer() {
ForEach([](uint32_t, const Memento& m) {
if (!m.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
});
}
// Evict one element from the table // Evict one element from the table
void HPackTable::EvictOne() { void HPackTable::EvictOne() {
auto first_entry = entries_.PopOne(); auto first_entry = entries_.PopOne();

@ -21,6 +21,8 @@
#include <stdint.h> #include <stdint.h>
#include <cstdint>
#include <limits>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
@ -34,6 +36,7 @@
#include "src/core/lib/gprpp/no_destruct.h" #include "src/core/lib/gprpp/no_destruct.h"
#include "src/core/lib/transport/metadata_batch.h" #include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/parsed_metadata.h" #include "src/core/lib/transport/parsed_metadata.h"
#include "src/core/util/unique_ptr_with_bitset.h"
namespace grpc_core { namespace grpc_core {
@ -54,11 +57,14 @@ class HPackTable {
struct Memento { struct Memento {
ParsedMetadata<grpc_metadata_batch> md; ParsedMetadata<grpc_metadata_batch> md;
std::unique_ptr<HpackParseResult> parse_status; // Alongside parse_status we store one bit indicating whether this memento
// has been looked up (and therefore consumed) or not.
UniquePtrWithBitset<HpackParseResult, 1> parse_status;
static const int kUsedBit = 0;
}; };
// Lookup, but don't ref. // Lookup, but don't ref.
const Memento* Lookup(uint32_t index) const { const Memento* Lookup(uint32_t index) {
// Static table comes first, just return an entry from it. // Static table comes first, just return an entry from it.
// NB: This imposes the constraint that the first // NB: This imposes the constraint that the first
// GRPC_CHTTP2_LAST_STATIC_ENTRY entries in the core static metadata table // GRPC_CHTTP2_LAST_STATIC_ENTRY entries in the core static metadata table
@ -97,6 +103,14 @@ class HPackTable {
class MementoRingBuffer { class MementoRingBuffer {
public: public:
MementoRingBuffer() {}
~MementoRingBuffer();
MementoRingBuffer(const MementoRingBuffer&) = delete;
MementoRingBuffer& operator=(const MementoRingBuffer&) = delete;
MementoRingBuffer(MementoRingBuffer&&) = default;
MementoRingBuffer& operator=(MementoRingBuffer&&) = default;
// Rebuild this buffer with a new max_entries_ size. // Rebuild this buffer with a new max_entries_ size.
void Rebuild(uint32_t max_entries); void Rebuild(uint32_t max_entries);
@ -109,10 +123,11 @@ class HPackTable {
Memento PopOne(); Memento PopOne();
// Lookup the entry at index, or return nullptr if none exists. // Lookup the entry at index, or return nullptr if none exists.
const Memento* Lookup(uint32_t index) const; const Memento* Lookup(uint32_t index);
const Memento* Peek(uint32_t index) const;
void ForEach(absl::FunctionRef<void(uint32_t dynamic_index, const Memento&)> template <typename F>
f) const; void ForEach(F f) const;
uint32_t max_entries() const { return max_entries_; } uint32_t max_entries() const { return max_entries_; }
uint32_t num_entries() const { return num_entries_; } uint32_t num_entries() const { return num_entries_; }
@ -126,11 +141,17 @@ class HPackTable {
// Maximum number of entries we could possibly fit in the table, given // Maximum number of entries we could possibly fit in the table, given
// defined overheads. // defined overheads.
uint32_t max_entries_ = hpack_constants::kInitialTableEntries; uint32_t max_entries_ = hpack_constants::kInitialTableEntries;
// Which index holds a timestamp (or kNoTimestamp if none do).
static constexpr uint32_t kNoTimestamp =
std::numeric_limits<uint32_t>::max();
uint32_t timestamp_index_ = kNoTimestamp;
// The timestamp associated with timestamp_entry_.
Timestamp timestamp_;
std::vector<Memento> entries_; std::vector<Memento> entries_;
}; };
const Memento* LookupDynamic(uint32_t index) const { const Memento* LookupDynamic(uint32_t index) {
// Not static - find the value in the list of valid entries // Not static - find the value in the list of valid entries
const uint32_t tbl_index = index - (hpack_constants::kLastStaticEntry + 1); const uint32_t tbl_index = index - (hpack_constants::kLastStaticEntry + 1);
return entries_.Lookup(tbl_index); return entries_.Lookup(tbl_index);

@ -108,7 +108,6 @@ struct secure_endpoint : public grpc_endpoint {
} }
~secure_endpoint() { ~secure_endpoint() {
memory_owner.Reset();
tsi_frame_protector_destroy(protector); tsi_frame_protector_destroy(protector);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector); tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
grpc_slice_buffer_destroy(&source_buffer); grpc_slice_buffer_destroy(&source_buffer);
@ -380,9 +379,12 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
static void on_write(void* user_data, grpc_error_handle error) { static void on_write(void* user_data, grpc_error_handle error) {
secure_endpoint* ep = static_cast<secure_endpoint*>(user_data); secure_endpoint* ep = static_cast<secure_endpoint*>(user_data);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, std::exchange(ep->write_cb, nullptr), grpc_closure* cb = ep->write_cb;
std::move(error)); ep->write_cb = nullptr;
SECURE_ENDPOINT_UNREF(ep, "write"); SECURE_ENDPOINT_UNREF(ep, "write");
grpc_core::EnsureRunInExecCtx([cb, error = std::move(error)]() {
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
});
} }
static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices, static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
@ -505,6 +507,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
static void endpoint_destroy(grpc_endpoint* secure_ep) { static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep); secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->wrapped_ep.reset(); ep->wrapped_ep.reset();
ep->memory_owner.Reset();
SECURE_ENDPOINT_UNREF(ep, "destroy"); SECURE_ENDPOINT_UNREF(ep, "destroy");
} }

@ -88,27 +88,27 @@ class SecurityHandshaker : public Handshaker {
private: private:
grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received, grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received,
size_t bytes_received_size); size_t bytes_received_size)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
grpc_error_handle OnHandshakeNextDoneLocked( grpc_error_handle OnHandshakeNextDoneLocked(
tsi_result result, const unsigned char* bytes_to_send, tsi_result result, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result); size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result)
void HandshakeFailedLocked(absl::Status error); ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void HandshakeFailedLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Finish(absl::Status status); void Finish(absl::Status status);
void OnHandshakeDataReceivedFromPeerFn(absl::Status error); void OnHandshakeDataReceivedFromPeerFn(absl::Status error);
void OnHandshakeDataSentToPeerFn(absl::Status error); void OnHandshakeDataSentToPeerFn(absl::Status error);
static void OnHandshakeDataReceivedFromPeerFnScheduler( void OnHandshakeDataReceivedFromPeerFnScheduler(grpc_error_handle error);
void* arg, grpc_error_handle error); void OnHandshakeDataSentToPeerFnScheduler(grpc_error_handle error);
static void OnHandshakeDataSentToPeerFnScheduler(void* arg,
grpc_error_handle error);
static void OnHandshakeNextDoneGrpcWrapper( static void OnHandshakeNextDoneGrpcWrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send, tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result); size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
static void OnPeerCheckedFn(void* arg, grpc_error_handle error); void OnPeerCheckedFn(grpc_error_handle error);
void OnPeerCheckedInner(grpc_error_handle error);
size_t MoveReadBufferIntoHandshakeBuffer(); size_t MoveReadBufferIntoHandshakeBuffer();
grpc_error_handle CheckPeerLocked(); grpc_error_handle CheckPeerLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// State set at creation time. // State set at creation time.
tsi_handshaker* handshaker_; tsi_handshaker* handshaker_;
@ -125,13 +125,11 @@ class SecurityHandshaker : public Handshaker {
size_t handshake_buffer_size_; size_t handshake_buffer_size_;
unsigned char* handshake_buffer_; unsigned char* handshake_buffer_;
SliceBuffer outgoing_; SliceBuffer outgoing_;
grpc_closure on_handshake_data_sent_to_peer_;
grpc_closure on_handshake_data_received_from_peer_;
grpc_closure on_peer_checked_;
RefCountedPtr<grpc_auth_context> auth_context_; RefCountedPtr<grpc_auth_context> auth_context_;
tsi_handshaker_result* handshaker_result_ = nullptr; tsi_handshaker_result* handshaker_result_ = nullptr;
size_t max_frame_size_ = 0; size_t max_frame_size_ = 0;
std::string tsi_handshake_error_; std::string tsi_handshake_error_;
grpc_closure* on_peer_checked_ ABSL_GUARDED_BY(mu_) = nullptr;
}; };
SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker, SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
@ -143,10 +141,7 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
handshake_buffer_( handshake_buffer_(
static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))), static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))),
max_frame_size_( max_frame_size_(
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) { std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {}
GRPC_CLOSURE_INIT(&on_peer_checked_, &SecurityHandshaker::OnPeerCheckedFn,
this, grpc_schedule_on_exec_ctx);
}
SecurityHandshaker::~SecurityHandshaker() { SecurityHandshaker::~SecurityHandshaker() {
tsi_handshaker_destroy(handshaker_); tsi_handshaker_destroy(handshaker_);
@ -220,8 +215,9 @@ MakeChannelzSecurityFromAuthContext(grpc_auth_context* auth_context) {
} // namespace } // namespace
void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) { void SecurityHandshaker::OnPeerCheckedFn(grpc_error_handle error) {
MutexLock lock(&mu_); MutexLock lock(&mu_);
on_peer_checked_ = nullptr;
if (!error.ok() || is_shutdown_) { if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(error); HandshakeFailedLocked(error);
return; return;
@ -317,11 +313,6 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
Finish(absl::OkStatus()); Finish(absl::OkStatus());
} }
void SecurityHandshaker::OnPeerCheckedFn(void* arg, grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker>(static_cast<SecurityHandshaker*>(arg))
->OnPeerCheckedInner(error);
}
grpc_error_handle SecurityHandshaker::CheckPeerLocked() { grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
tsi_peer peer; tsi_peer peer;
tsi_result result = tsi_result result =
@ -330,8 +321,12 @@ grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (", return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (",
tsi_result_to_string(result), ")")); tsi_result_to_string(result), ")"));
} }
on_peer_checked_ = NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnPeerCheckedFn(std::move(status));
});
connector_->check_peer(peer, args_->endpoint.get(), args_->args, connector_->check_peer(peer, args_->endpoint.get(), args_->args,
&auth_context_, &on_peer_checked_); &auth_context_, on_peer_checked_);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name( grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME); auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME);
const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it); const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
@ -356,10 +351,10 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
CHECK_EQ(bytes_to_send_size, 0u); CHECK_EQ(bytes_to_send_size, 0u);
grpc_endpoint_read( grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(), args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT( NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
&on_handshake_data_received_from_peer_, absl::Status status) {
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler, self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
this, grpc_schedule_on_exec_ctx), }),
/*urgent=*/true, /*min_progress_size=*/1); /*urgent=*/true, /*min_progress_size=*/1);
return error; return error;
} }
@ -387,19 +382,19 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size)); reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size));
grpc_endpoint_write( grpc_endpoint_write(
args_->endpoint.get(), outgoing_.c_slice_buffer(), args_->endpoint.get(), outgoing_.c_slice_buffer(),
GRPC_CLOSURE_INIT( NewClosure(
&on_handshake_data_sent_to_peer_, [self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
&SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler, this, self->OnHandshakeDataSentToPeerFnScheduler(std::move(status));
grpc_schedule_on_exec_ctx), }),
nullptr, /*max_frame_size=*/INT_MAX); nullptr, /*max_frame_size=*/INT_MAX);
} else if (handshaker_result == nullptr) { } else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer. // There is nothing to send, but need to read from peer.
grpc_endpoint_read( grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(), args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT( NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
&on_handshake_data_received_from_peer_, absl::Status status) {
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler, self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
this, grpc_schedule_on_exec_ctx), }),
/*urgent=*/true, /*min_progress_size=*/1); /*urgent=*/true, /*min_progress_size=*/1);
} else { } else {
// Handshake has finished, check peer and so on. // Handshake has finished, check peer and so on.
@ -418,8 +413,6 @@ void SecurityHandshaker::OnHandshakeNextDoneGrpcWrapper(
result, bytes_to_send, bytes_to_send_size, handshaker_result); result, bytes_to_send, bytes_to_send_size, handshaker_result);
if (!error.ok()) { if (!error.ok()) {
h->HandshakeFailedLocked(std::move(error)); h->HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
} }
} }
@ -429,13 +422,15 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
const unsigned char* bytes_to_send = nullptr; const unsigned char* bytes_to_send = nullptr;
size_t bytes_to_send_size = 0; size_t bytes_to_send_size = 0;
tsi_handshaker_result* hs_result = nullptr; tsi_handshaker_result* hs_result = nullptr;
auto self = RefAsSubclass<SecurityHandshaker>();
tsi_result result = tsi_handshaker_next( tsi_result result = tsi_handshaker_next(
handshaker_, bytes_received, bytes_received_size, &bytes_to_send, handshaker_, bytes_received, bytes_received_size, &bytes_to_send,
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper, this, &bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper,
&tsi_handshake_error_); self.get(), &tsi_handshake_error_);
if (result == TSI_ASYNC) { if (result == TSI_ASYNC) {
// Handshaker operating asynchronously. Nothing else to do here; // Handshaker operating asynchronously. Callback will be invoked in a TSI
// callback will be invoked in a TSI thread. // thread. We no longer own the ref held in self.
self.release();
return absl::OkStatus(); return absl::OkStatus();
} }
// Handshaker returned synchronously. Invoke callback directly in // Handshaker returned synchronously. Invoke callback directly in
@ -449,18 +444,18 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
// TODO(roth): This will no longer be necessary once we migrate to the // TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API. // EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler( void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error) { grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg); args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
handshaker->args_->event_engine->Run( error = std::move(error)]() mutable {
[handshaker, error = std::move(error)]() mutable { ApplicationCallbackExecCtx callback_exec_ctx;
ApplicationCallbackExecCtx callback_exec_ctx; ExecCtx exec_ctx;
ExecCtx exec_ctx; self->OnHandshakeDataReceivedFromPeerFn(std::move(error));
handshaker->OnHandshakeDataReceivedFromPeerFn(std::move(error)); // Avoid destruction outside of an ExecCtx (since this is non-cancelable).
}); self.reset();
});
} }
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) { void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_); MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) { if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked( HandshakeFailedLocked(
@ -473,8 +468,6 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size); error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) { if (!error.ok()) {
HandshakeFailedLocked(std::move(error)); HandshakeFailedLocked(std::move(error));
} else {
handshaker.release(); // Avoid unref
} }
} }
@ -483,18 +476,18 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
// TODO(roth): This will no longer be necessary once we migrate to the // TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API. // EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler( void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler(
void* arg, grpc_error_handle error) { grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg); args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
handshaker->args_->event_engine->Run( error = std::move(error)]() mutable {
[handshaker, error = std::move(error)]() mutable { ApplicationCallbackExecCtx callback_exec_ctx;
ApplicationCallbackExecCtx callback_exec_ctx; ExecCtx exec_ctx;
ExecCtx exec_ctx; self->OnHandshakeDataSentToPeerFn(std::move(error));
handshaker->OnHandshakeDataSentToPeerFn(std::move(error)); // Avoid destruction outside of an ExecCtx (since this is non-cancelable).
}); self.reset();
});
} }
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) { void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_); MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) { if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked( HandshakeFailedLocked(
@ -505,10 +498,10 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
if (handshaker_result_ == nullptr) { if (handshaker_result_ == nullptr) {
grpc_endpoint_read( grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(), args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT( NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
&on_handshake_data_received_from_peer_, absl::Status status) {
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler, self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
this, grpc_schedule_on_exec_ctx), }),
/*urgent=*/true, /*min_progress_size=*/1); /*urgent=*/true, /*min_progress_size=*/1);
} else { } else {
error = CheckPeerLocked(); error = CheckPeerLocked();
@ -517,7 +510,6 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
return; return;
} }
} }
handshaker.release(); // Avoid unref
} }
// //
@ -528,7 +520,7 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
MutexLock lock(&mu_); MutexLock lock(&mu_);
if (!is_shutdown_) { if (!is_shutdown_) {
is_shutdown_ = true; is_shutdown_ = true;
connector_->cancel_check_peer(&on_peer_checked_, std::move(error)); connector_->cancel_check_peer(on_peer_checked_, std::move(error));
tsi_handshaker_shutdown(handshaker_); tsi_handshaker_shutdown(handshaker_);
args_->endpoint.reset(); args_->endpoint.reset();
} }
@ -537,7 +529,6 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
void SecurityHandshaker::DoHandshake( void SecurityHandshaker::DoHandshake(
HandshakerArgs* args, HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) { absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
auto ref = Ref();
MutexLock lock(&mu_); MutexLock lock(&mu_);
args_ = args; args_ = args;
on_handshake_done_ = std::move(on_handshake_done); on_handshake_done_ = std::move(on_handshake_done);
@ -546,8 +537,6 @@ void SecurityHandshaker::DoHandshake(
DoHandshakerNextLocked(handshake_buffer_, bytes_received_size); DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) { if (!error.ok()) {
HandshakeFailedLocked(error); HandshakeFailedLocked(error);
} else {
ref.release(); // Avoid unref
} }
} }

@ -207,10 +207,8 @@ void BaseCallData::CapturedBatch::ResumeWith(Flusher* releaser) {
uintptr_t& refcnt = *RefCountField(batch); uintptr_t& refcnt = *RefCountField(batch);
if (refcnt == 0) { if (refcnt == 0) {
// refcnt==0 ==> cancelled // refcnt==0 ==> cancelled
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << releaser->call()->DebugTag() << releaser->call()->DebugTag() << "RESUME BATCH REQUEST CANCELLED";
<< "RESUME BATCH REQUEST CANCELLED";
}
return; return;
} }
if (--refcnt == 0) { if (--refcnt == 0) {
@ -266,10 +264,9 @@ BaseCallData::Flusher::~Flusher() {
auto* batch = static_cast<grpc_transport_stream_op_batch*>(p); auto* batch = static_cast<grpc_transport_stream_op_batch*>(p);
BaseCallData* call = BaseCallData* call =
static_cast<BaseCallData*>(batch->handler_private.extra_arg); static_cast<BaseCallData*>(batch->handler_private.extra_arg);
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << "FLUSHER:forward batch via closure: " << "FLUSHER:forward batch via closure: "
<< grpc_transport_stream_op_batch_string(batch, false); << grpc_transport_stream_op_batch_string(batch, false);
}
grpc_call_next_op(call->elem(), batch); grpc_call_next_op(call->elem(), batch);
GRPC_CALL_STACK_UNREF(call->call_stack(), "flusher_batch"); GRPC_CALL_STACK_UNREF(call->call_stack(), "flusher_batch");
}; };
@ -278,10 +275,9 @@ BaseCallData::Flusher::~Flusher() {
if (call_->call() != nullptr && call_->call()->traced()) { if (call_->call() != nullptr && call_->call()->traced()) {
batch->is_traced = true; batch->is_traced = true;
} }
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << "FLUSHER:queue batch to forward in closure: " << "FLUSHER:queue batch to forward in closure: "
<< grpc_transport_stream_op_batch_string(release_[i], false); << grpc_transport_stream_op_batch_string(release_[i], false);
}
batch->handler_private.extra_arg = call_; batch->handler_private.extra_arg = call_;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, call_next_op, batch, GRPC_CLOSURE_INIT(&batch->handler_private.closure, call_next_op, batch,
nullptr); nullptr);
@ -290,10 +286,9 @@ BaseCallData::Flusher::~Flusher() {
"flusher_batch"); "flusher_batch");
} }
call_closures_.RunClosuresWithoutYielding(call_->call_combiner()); call_closures_.RunClosuresWithoutYielding(call_->call_combiner());
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << "FLUSHER:forward batch: " << "FLUSHER:forward batch: "
<< grpc_transport_stream_op_batch_string(release_[0], false); << grpc_transport_stream_op_batch_string(release_[0], false);
}
if (call_->call() != nullptr && call_->call()->traced()) { if (call_->call() != nullptr && call_->call()->traced()) {
release_[0]->is_traced = true; release_[0]->is_traced = true;
} }
@ -331,10 +326,8 @@ const char* BaseCallData::SendMessage::StateString(State state) {
} }
void BaseCallData::SendMessage::StartOp(CapturedBatch batch) { void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << base_->LogTag() << base_->LogTag() << " SendMessage.StartOp st=" << StateString(state_);
<< " SendMessage.StartOp st=" << StateString(state_);
}
switch (state_) { switch (state_) {
case State::kInitial: case State::kInitial:
state_ = State::kGotBatchNoPipe; state_ = State::kGotBatchNoPipe;
@ -359,10 +352,8 @@ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
template <typename T> template <typename T>
void BaseCallData::SendMessage::GotPipe(T* pipe_end) { void BaseCallData::SendMessage::GotPipe(T* pipe_end) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << base_->LogTag() << base_->LogTag() << " SendMessage.GotPipe st=" << StateString(state_);
<< " SendMessage.GotPipe st=" << StateString(state_);
}
CHECK_NE(pipe_end, nullptr); CHECK_NE(pipe_end, nullptr);
switch (state_) { switch (state_) {
case State::kInitial: case State::kInitial:
@ -615,10 +606,9 @@ const char* BaseCallData::ReceiveMessage::StateString(State state) {
} }
void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) { void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << base_->LogTag() << base_->LogTag()
<< " ReceiveMessage.StartOp st=" << StateString(state_); << " ReceiveMessage.StartOp st=" << StateString(state_);
}
switch (state_) { switch (state_) {
case State::kInitial: case State::kInitial:
state_ = State::kForwardedBatchNoPipe; state_ = State::kForwardedBatchNoPipe;
@ -656,10 +646,9 @@ void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
template <typename T> template <typename T>
void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) { void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << base_->LogTag() << base_->LogTag()
<< " ReceiveMessage.GotPipe st=" << StateString(state_); << " ReceiveMessage.GotPipe st=" << StateString(state_);
}
switch (state_) { switch (state_) {
case State::kInitial: case State::kInitial:
state_ = State::kIdle; state_ = State::kIdle;
@ -901,10 +890,9 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
case State::kPulledFromPipe: { case State::kPulledFromPipe: {
CHECK(push_.has_value()); CHECK(push_.has_value());
if ((*push_)().ready()) { if ((*push_)().ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << base_->LogTag() << base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete"; << " ReceiveMessage.WakeInsideCombiner push complete";
}
if (state_ == State::kCompletedWhilePulledFromPipe) { if (state_ == State::kCompletedWhilePulledFromPipe) {
interceptor()->Push()->Close(); interceptor()->Push()->Close();
state_ = State::kCancelled; state_ = State::kCancelled;
@ -1016,10 +1004,9 @@ class ClientCallData::PollContext {
void Run() { void Run() {
DCHECK(HasContext<Arena>()); DCHECK(HasContext<Arena>());
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << self_->LogTag() << " ClientCallData.PollContext.Run " << self_->LogTag() << " ClientCallData.PollContext.Run "
<< self_->DebugString(); << self_->DebugString();
}
CHECK(have_scoped_activity_); CHECK(have_scoped_activity_);
repoll_ = false; repoll_ = false;
if (self_->send_message() != nullptr) { if (self_->send_message() != nullptr) {
@ -1664,10 +1651,8 @@ void ClientCallData::HookRecvTrailingMetadata(CapturedBatch batch) {
// - return a wrapper around PollTrailingMetadata as the promise. // - return a wrapper around PollTrailingMetadata as the promise.
ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise( ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
CallArgs call_args) { CallArgs call_args) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << LogTag() << " ClientCallData.MakeNextPromise " << LogTag() << " ClientCallData.MakeNextPromise " << DebugString();
<< DebugString();
}
CHECK_NE(poll_ctx_, nullptr); CHECK_NE(poll_ctx_, nullptr);
CHECK(send_initial_state_ == SendInitialState::kQueued); CHECK(send_initial_state_ == SendInitialState::kQueued);
send_initial_metadata_batch_->payload->send_initial_metadata send_initial_metadata_batch_->payload->send_initial_metadata
@ -1727,10 +1712,8 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
// All polls: await receiving the trailing metadata, then return it to the // All polls: await receiving the trailing metadata, then return it to the
// application. // application.
Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() { Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << LogTag() << " ClientCallData.PollTrailingMetadata " << LogTag() << " ClientCallData.PollTrailingMetadata " << DebugString();
<< DebugString();
}
CHECK_NE(poll_ctx_, nullptr); CHECK_NE(poll_ctx_, nullptr);
if (send_initial_state_ == SendInitialState::kQueued) { if (send_initial_state_ == SendInitialState::kQueued) {
// First poll: pass the send_initial_metadata op down the stack. // First poll: pass the send_initial_metadata op down the stack.
@ -2275,10 +2258,9 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
// All polls: await sending the trailing metadata, then foward it down the // All polls: await sending the trailing metadata, then foward it down the
// stack. // stack.
Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() { Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << LogTag() << LogTag()
<< " PollTrailingMetadata: " << StateString(send_trailing_state_); << " PollTrailingMetadata: " << StateString(send_trailing_state_);
}
switch (send_trailing_state_) { switch (send_trailing_state_) {
case SendTrailingState::kInitial: case SendTrailingState::kInitial:
case SendTrailingState::kQueuedBehindSendMessage: case SendTrailingState::kQueuedBehindSendMessage:
@ -2306,10 +2288,9 @@ void ServerCallData::RecvTrailingMetadataReadyCallback(
} }
void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) { void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) { GRPC_TRACE_LOG(channel, INFO)
LOG(INFO) << LogTag() << ": RecvTrailingMetadataReady error=" << error << LogTag() << ": RecvTrailingMetadataReady error=" << error
<< " md=" << recv_trailing_metadata_->DebugString(); << " md=" << recv_trailing_metadata_->DebugString();
}
Flusher flusher(this); Flusher flusher(this);
PollContext poll_ctx(this, &flusher); PollContext poll_ctx(this, &flusher);
Completed(error, recv_trailing_metadata_->get(GrpcTarPit()).has_value(), Completed(error, recv_trailing_metadata_->get(GrpcTarPit()).has_value(),

@ -97,6 +97,8 @@ absl::Status AresStatusToAbslStatus(int status, absl::string_view error_msg) {
return absl::UnimplementedError(error_msg); return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND: case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg); return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default: default:
return absl::UnknownError(error_msg); return absl::UnknownError(error_msg);
} }

@ -55,14 +55,14 @@
- name: canary_client_privacy - name: canary_client_privacy
description: description:
If set, canary client privacy If set, canary client privacy
expiry: 2024/08/01 expiry: 2024/12/01
owner: alishananda@google.com owner: alishananda@google.com
test_tags: [] test_tags: []
allow_in_fuzzing_config: false allow_in_fuzzing_config: false
- name: client_privacy - name: client_privacy
description: description:
If set, client privacy If set, client privacy
expiry: 2024/08/01 expiry: 2024/12/01
owner: alishananda@google.com owner: alishananda@google.com
test_tags: [] test_tags: []
allow_in_fuzzing_config: false allow_in_fuzzing_config: false
@ -88,7 +88,7 @@
uses_polling: true uses_polling: true
- name: free_large_allocator - name: free_large_allocator
description: If set, return all free bytes from a "big" allocator description: If set, return all free bytes from a "big" allocator
expiry: 2024/08/01 expiry: 2024/12/01
owner: alishananda@google.com owner: alishananda@google.com
test_tags: [resource_quota_test] test_tags: [resource_quota_test]
- name: max_pings_wo_data_throttle - name: max_pings_wo_data_throttle
@ -138,7 +138,7 @@
- name: server_privacy - name: server_privacy
description: description:
If set, server privacy If set, server privacy
expiry: 2024/08/01 expiry: 2024/12/01
owner: alishananda@google.com owner: alishananda@google.com
test_tags: [] test_tags: []
allow_in_fuzzing_config: false allow_in_fuzzing_config: false

@ -136,10 +136,9 @@ class WorkSerializer::LegacyWorkSerializer final : public WorkSerializerImpl {
void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback, void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
const DebugLocation& location) { const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { GRPC_TRACE_LOG(work_serializer, INFO)
LOG(INFO) << "WorkSerializer::Run() " << this << " Scheduling callback [" << "WorkSerializer::Run() " << this << " Scheduling callback ["
<< location.file() << ":" << location.line() << "]"; << location.file() << ":" << location.line() << "]";
}
// Increment queue size for the new callback and owner count to attempt to // Increment queue size for the new callback and owner count to attempt to
// take ownership of the WorkSerializer. // take ownership of the WorkSerializer.
const uint64_t prev_ref_pair = const uint64_t prev_ref_pair =
@ -405,10 +404,9 @@ void WorkSerializer::DispatchingWorkSerializer::Orphan() {
// Implementation of WorkSerializerImpl::Run // Implementation of WorkSerializerImpl::Run
void WorkSerializer::DispatchingWorkSerializer::Run( void WorkSerializer::DispatchingWorkSerializer::Run(
std::function<void()> callback, const DebugLocation& location) { std::function<void()> callback, const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { GRPC_TRACE_LOG(work_serializer, INFO)
LOG(INFO) << "WorkSerializer[" << this << "] Scheduling callback [" << "WorkSerializer[" << this << "] Scheduling callback ["
<< location.file() << ":" << location.line() << "]"; << location.file() << ":" << location.line() << "]";
}
global_stats().IncrementWorkSerializerItemsEnqueued(); global_stats().IncrementWorkSerializerItemsEnqueued();
MutexLock lock(&mu_); MutexLock lock(&mu_);
if (!running_) { if (!running_) {
@ -438,10 +436,9 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
// Grab the last element of processing_ - which is the next item in our // Grab the last element of processing_ - which is the next item in our
// queue since processing_ is stored in reverse order. // queue since processing_ is stored in reverse order.
auto& cb = processing_.back(); auto& cb = processing_.back();
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { GRPC_TRACE_LOG(work_serializer, INFO)
LOG(INFO) << "WorkSerializer[" << this << "] Executing callback [" << "WorkSerializer[" << this << "] Executing callback ["
<< cb.location.file() << ":" << cb.location.line() << "]"; << cb.location.file() << ":" << cb.location.line() << "]";
}
// Run the work item. // Run the work item.
const auto start = std::chrono::steady_clock::now(); const auto start = std::chrono::steady_clock::now();
SetCurrentThread(); SetCurrentThread();

@ -1125,10 +1125,8 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
} }
goto done; goto done;
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) { GRPC_TRACE_LOG(polling, INFO) << " .. non-root poller " << next_worker
LOG(INFO) << " .. non-root poller " << next_worker << " (root=" << root_worker << ")";
<< " (root=" << root_worker << ")";
}
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done; goto done;

@ -173,10 +173,9 @@ class EventEngineEndpointWrapper {
void FinishPendingWrite(absl::Status status) { void FinishPendingWrite(absl::Status status) {
auto* write_buffer = reinterpret_cast<SliceBuffer*>(&eeep_->write_buffer); auto* write_buffer = reinterpret_cast<SliceBuffer*>(&eeep_->write_buffer);
write_buffer->~SliceBuffer(); write_buffer->~SliceBuffer();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "TCP: " << this << " WRITE (peer=" << PeerAddress() << "TCP: " << this << " WRITE (peer=" << PeerAddress()
<< ") error=" << status; << ") error=" << status;
}
grpc_closure* cb = pending_write_cb_; grpc_closure* cb = pending_write_cb_;
pending_write_cb_ = nullptr; pending_write_cb_ = nullptr;
if (grpc_core::ExecCtx::Get() == nullptr) { if (grpc_core::ExecCtx::Get() == nullptr) {

@ -30,8 +30,12 @@
grpc_polling_entity grpc_polling_entity_create_from_pollset_set( grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set) { grpc_pollset_set* pollset_set) {
grpc_polling_entity pollent; grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set; if (pollset_set == nullptr) {
pollent.tag = GRPC_POLLS_POLLSET_SET; pollent.tag = GRPC_POLLS_NONE;
} else {
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
}
return pollent; return pollent;
} }
@ -73,6 +77,8 @@ void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr); CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set); grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else { } else {
grpc_core::Crash( grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag)); absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));
@ -93,6 +99,8 @@ void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr); CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set); grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else { } else {
grpc_core::Crash( grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag)); absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));

@ -141,10 +141,9 @@ done:
static void tc_on_alarm(void* acp, grpc_error_handle error) { static void tc_on_alarm(void* acp, grpc_error_handle error) {
int done; int done;
async_connect* ac = static_cast<async_connect*>(acp); async_connect* ac = static_cast<async_connect*>(acp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str << "CLIENT_CONNECT: " << ac->addr_str
<< ": on_alarm: error=" << grpc_core::StatusToString(error); << ": on_alarm: error=" << grpc_core::StatusToString(error);
}
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
if (ac->fd != nullptr) { if (ac->fd != nullptr) {
grpc_fd_shutdown(ac->fd, GRPC_ERROR_CREATE("connect() timed out")); grpc_fd_shutdown(ac->fd, GRPC_ERROR_CREATE("connect() timed out"));
@ -180,10 +179,9 @@ static void on_writable(void* acp, grpc_error_handle error) {
std::string addr_str = ac->addr_str; std::string addr_str = ac->addr_str;
grpc_fd* fd; grpc_fd* fd;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str << "CLIENT_CONNECT: " << ac->addr_str
<< ": on_writable: error=" << grpc_core::StatusToString(error); << ": on_writable: error=" << grpc_core::StatusToString(error);
}
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
CHECK(ac->fd); CHECK(ac->fd);
@ -381,10 +379,8 @@ int64_t grpc_tcp_client_create_from_prepared_fd(
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
ac->options = options; ac->options = options;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str << ": asynchronously connecting fd " << fdobj;
<< ": asynchronously connecting fd " << fdobj;
}
int shard_number = connection_id % (*g_connection_shards).size(); int shard_number = connection_id % (*g_connection_shards).size();
struct ConnectionShard* shard = &(*g_connection_shards)[shard_number]; struct ConnectionShard* shard = &(*g_connection_shards)[shard_number];

@ -669,10 +669,8 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
old_count = g_uncovered_notifications_pending--; old_count = g_uncovered_notifications_pending--;
g_backup_poller_mu->Unlock(); g_backup_poller_mu->Unlock();
CHECK_GT(old_count, 1); CHECK_GT(old_count, 1);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " uncover cnt "
LOG(INFO) << "BACKUP_POLLER:" << p << " uncover cnt " << old_count << "->" << old_count << "->" << old_count - 1;
<< old_count - 1;
}
} }
// gRPC API considers a Write operation to be done the moment it clears ‘flow // gRPC API considers a Write operation to be done the moment it clears ‘flow
@ -705,10 +703,8 @@ static void cover_self(grpc_tcp* tcp) {
p = g_backup_poller; p = g_backup_poller;
g_backup_poller_mu->Unlock(); g_backup_poller_mu->Unlock();
} }
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " add " << tcp
LOG(INFO) << "BACKUP_POLLER:" << p << " add " << tcp << " cnt " << " cnt " << old_count - 1 << "->" << old_count;
<< old_count - 1 << "->" << old_count;
}
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd); grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
} }
@ -731,10 +727,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg, static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) { grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "TCP:" << arg << "TCP:" << arg << " got_write: " << grpc_core::StatusToString(error);
<< " got_write: " << grpc_core::StatusToString(error);
}
drop_uncovered(static_cast<grpc_tcp*>(arg)); drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error); tcp_handle_write(arg, error);
} }
@ -1129,10 +1123,8 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) { static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg); grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "TCP:" << tcp << "TCP:" << tcp << " got_read: " << grpc_core::StatusToString(error);
<< " got_read: " << grpc_core::StatusToString(error);
}
tcp->read_mu.Lock(); tcp->read_mu.Lock();
grpc_error_handle tcp_read_error; grpc_error_handle tcp_read_error;
if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) { if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) {
@ -1472,10 +1464,9 @@ static bool process_errors(grpc_tcp* tcp) {
} else { } else {
// Got a control message that is not a timestamp or zerocopy. Don't know // Got a control message that is not a timestamp or zerocopy. Don't know
// how to handle this. // how to handle this.
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "unknown control message cmsg_level:" << cmsg->cmsg_level << "unknown control message cmsg_level:" << cmsg->cmsg_level
<< " cmsg_type:" << cmsg->cmsg_type; << " cmsg_type:" << cmsg->cmsg_type;
}
return processed_err; return processed_err;
} }
} }

@ -177,10 +177,9 @@ static grpc_error_handle CreateEventEngineListener(
<< addr_uri.status().ToString(); << addr_uri.status().ToString();
return; return;
} }
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO) << "SERVER_CONNECT: incoming external "
LOG(INFO) << "SERVER_CONNECT: incoming external connection: " "connection: "
<< addr_uri->c_str(); << addr_uri->c_str();
}
} }
read_notifier_pollset = read_notifier_pollset =
(*(s->pollsets))[static_cast<size_t>( (*(s->pollsets))[static_cast<size_t>(
@ -916,10 +915,8 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
LOG(ERROR) << "Invalid address: " << addr_uri.status(); LOG(ERROR) << "Invalid address: " << addr_uri.status();
return; return;
} }
if (GRPC_TRACE_FLAG_ENABLED(tcp)) { GRPC_TRACE_LOG(tcp, INFO)
LOG(INFO) << "SERVER_CONNECT: incoming external connection: " << "SERVER_CONNECT: incoming external connection: " << *addr_uri;
<< *addr_uri;
}
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value()); std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true); grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);
read_notifier_pollset = read_notifier_pollset =

@ -766,10 +766,9 @@ double PressureTracker::AddSampleAndGetControlValue(double sample) {
} else { } else {
report = controller_.Update(current_estimate - kSetPoint); report = controller_.Update(current_estimate - kSetPoint);
} }
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { GRPC_TRACE_LOG(resource_quota, INFO)
LOG(INFO) << "RQ: pressure:" << current_estimate << " report:" << report << "RQ: pressure:" << current_estimate << " report:" << report
<< " controller:" << controller_.DebugString(); << " controller:" << controller_.DebugString();
}
report_.store(report, std::memory_order_relaxed); report_.store(report, std::memory_order_relaxed);
}); });
return report_.load(std::memory_order_relaxed); return report_.load(std::memory_order_relaxed);

@ -425,10 +425,8 @@ class GrpcMemoryAllocatorImpl final : public EventEngineMemoryAllocatorImpl {
void ReturnFree() { void ReturnFree() {
size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel); size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel);
if (ret == 0) return; if (ret == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { GRPC_TRACE_LOG(resource_quota, INFO)
LOG(INFO) << "Allocator " << this << " returning " << ret << "Allocator " << this << " returning " << ret << " bytes to quota";
<< " bytes to quota";
}
taken_bytes_.fetch_sub(ret, std::memory_order_relaxed); taken_bytes_.fetch_sub(ret, std::memory_order_relaxed);
memory_quota_->Return(ret); memory_quota_->Return(ret);
memory_quota_->MaybeMoveAllocator(this, /*old_free_bytes=*/ret, memory_quota_->MaybeMoveAllocator(this, /*old_free_bytes=*/ret,

@ -77,10 +77,9 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
AuthorizationEngine::Decision decision = AuthorizationEngine::Decision decision =
engines.deny_engine->Evaluate(args); engines.deny_engine->Evaluate(args);
if (decision.type == AuthorizationEngine::Decision::Type::kDeny) { if (decision.type == AuthorizationEngine::Decision::Type::kDeny) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) { GRPC_TRACE_LOG(grpc_authz_api, INFO)
LOG(INFO) << "chand=" << this << ": request denied by policy " << "chand=" << this << ": request denied by policy "
<< decision.matching_policy_name; << decision.matching_policy_name;
}
return false; return false;
} }
} }
@ -95,10 +94,8 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
return true; return true;
} }
} }
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) { GRPC_TRACE_LOG(grpc_authz_api, INFO)
LOG(INFO) << "chand=" << this << "chand=" << this << ": request denied, no matching policy found.";
<< ": request denied, no matching policy found.";
}
return false; return false;
} }

@ -121,10 +121,9 @@ void grpc_plugin_credentials::PendingRequest::RequestMetadataReady(
GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP); GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP);
grpc_core::RefCountedPtr<grpc_plugin_credentials::PendingRequest> r( grpc_core::RefCountedPtr<grpc_plugin_credentials::PendingRequest> r(
static_cast<grpc_plugin_credentials::PendingRequest*>(request)); static_cast<grpc_plugin_credentials::PendingRequest*>(request));
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) { GRPC_TRACE_LOG(plugin_credentials, INFO)
LOG(INFO) << "plugin_credentials[" << r->creds() << "]: request " << r.get() << "plugin_credentials[" << r->creds() << "]: request " << r.get()
<< ": plugin returned asynchronously"; << ": plugin returned asynchronously";
}
for (size_t i = 0; i < num_md; ++i) { for (size_t i = 0; i < num_md; ++i) {
grpc_metadata p; grpc_metadata p;
p.key = grpc_core::CSliceRef(md[i].key); p.key = grpc_core::CSliceRef(md[i].key);
@ -150,10 +149,9 @@ grpc_plugin_credentials::GetRequestMetadata(
RefAsSubclass<grpc_plugin_credentials>(), std::move(initial_metadata), RefAsSubclass<grpc_plugin_credentials>(), std::move(initial_metadata),
args); args);
// Invoke the plugin. The callback holds a ref to us. // Invoke the plugin. The callback holds a ref to us.
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) { GRPC_TRACE_LOG(plugin_credentials, INFO)
LOG(INFO) << "plugin_credentials[" << this << "]: request " << request.get() << "plugin_credentials[" << this << "]: request " << request.get()
<< ": invoking plugin"; << ": invoking plugin";
}
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX]; grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0; size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK; grpc_status_code status = GRPC_STATUS_OK;
@ -168,17 +166,15 @@ grpc_plugin_credentials::GetRequestMetadata(
child_request.get(), creds_md, &num_creds_md, child_request.get(), creds_md, &num_creds_md,
&status, &error_details)) { &status, &error_details)) {
child_request.release(); child_request.release();
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) { GRPC_TRACE_LOG(plugin_credentials, INFO)
LOG(INFO) << "plugin_credentials[" << this << "]: request " << "plugin_credentials[" << this << "]: request " << request.get()
<< request.get() << ": plugin will return asynchronously"; << ": plugin will return asynchronously";
}
return [request] { return request->PollAsyncResult(); }; return [request] { return request->PollAsyncResult(); };
} }
// Synchronous return. // Synchronous return.
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) { GRPC_TRACE_LOG(plugin_credentials, INFO)
LOG(INFO) << "plugin_credentials[" << this << "]: request " << request.get() << "plugin_credentials[" << this << "]: request " << request.get()
<< ": plugin returned synchronously"; << ": plugin returned synchronously";
}
auto result = request->ProcessPluginResult(creds_md, num_creds_md, status, auto result = request->ProcessPluginResult(creds_md, num_creds_md, status,
error_details); error_details);
// Clean up. // Clean up.

@ -46,17 +46,15 @@ struct grpc_slice_refcount {
void Ref(grpc_core::DebugLocation location) { void Ref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed); auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) { GRPC_TRACE_LOG(slice_refcount, INFO)
LOG(INFO).AtLocation(location.file(), location.line()) .AtLocation(location.file(), location.line())
<< "REF " << this << " " << prev_refs << "->" << prev_refs + 1; << "REF " << this << " " << prev_refs << "->" << prev_refs + 1;
}
} }
void Unref(grpc_core::DebugLocation location) { void Unref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel); auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) { GRPC_TRACE_LOG(slice_refcount, INFO)
LOG(INFO).AtLocation(location.file(), location.line()) .AtLocation(location.file(), location.line())
<< "UNREF " << this << " " << prev_refs << "->" << prev_refs - 1; << "UNREF " << this << " " << prev_refs << "->" << prev_refs - 1;
}
if (prev_refs == 1) { if (prev_refs == 1) {
destroyer_fn_(this); destroyer_fn_(this);
} }

@ -337,9 +337,9 @@ void Call::HandleCompressionAlgorithmDisabled(
void Call::UpdateDeadline(Timestamp deadline) { void Call::UpdateDeadline(Timestamp deadline) {
ReleasableMutexLock lock(&deadline_mu_); ReleasableMutexLock lock(&deadline_mu_);
if (GRPC_TRACE_FLAG_ENABLED(call)) { if (GRPC_TRACE_FLAG_ENABLED(call)) {
VLOG(2) << "[call " << this LOG(INFO) << "[call " << this
<< "] UpdateDeadline from=" << deadline_.ToString() << "] UpdateDeadline from=" << deadline_.ToString()
<< " to=" << deadline.ToString(); << " to=" << deadline.ToString();
} }
if (deadline >= deadline_) return; if (deadline >= deadline_) return;
if (deadline < Timestamp::Now()) { if (deadline < Timestamp::Now()) {

@ -71,10 +71,9 @@ Timestamp BdpEstimator::CompletePing() {
} }
if (start_inter_ping_delay != inter_ping_delay_) { if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0; stable_estimate_count_ = 0;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) { GRPC_TRACE_LOG(bdp_estimator, INFO)
LOG(INFO) << "bdp[" << name_ << "]:update_inter_time to " << "bdp[" << name_ << "]:update_inter_time to "
<< inter_ping_delay_.millis() << "ms"; << inter_ping_delay_.millis() << "ms";
}
} }
ping_state_ = PingState::UNSCHEDULED; ping_state_ = PingState::UNSCHEDULED;
accumulator_ = 0; accumulator_ = 0;

@ -49,10 +49,9 @@ class BdpEstimator {
// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a // grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
// transport (but not necessarily started) // transport (but not necessarily started)
void SchedulePing() { void SchedulePing() {
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) { GRPC_TRACE_LOG(bdp_estimator, INFO)
LOG(INFO) << "bdp[" << name_ << "]:sched acc=" << accumulator_ << "bdp[" << name_ << "]:sched acc=" << accumulator_
<< " est=" << estimate_; << " est=" << estimate_;
}
CHECK(ping_state_ == PingState::UNSCHEDULED); CHECK(ping_state_ == PingState::UNSCHEDULED);
ping_state_ = PingState::SCHEDULED; ping_state_ = PingState::SCHEDULED;
accumulator_ = 0; accumulator_ = 0;
@ -62,10 +61,9 @@ class BdpEstimator {
// once // once
// the ping is on the wire // the ping is on the wire
void StartPing() { void StartPing() {
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) { GRPC_TRACE_LOG(bdp_estimator, INFO)
LOG(INFO) << "bdp[" << name_ << "]:start acc=" << accumulator_ << "bdp[" << name_ << "]:start acc=" << accumulator_
<< " est=" << estimate_; << " est=" << estimate_;
}
CHECK(ping_state_ == PingState::SCHEDULED); CHECK(ping_state_ == PingState::SCHEDULED);
ping_state_ = PingState::STARTED; ping_state_ = PingState::STARTED;
ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC); ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);

@ -51,23 +51,24 @@ class CallSpine final : public Party {
std::move(client_initial_metadata), std::move(arena))); std::move(client_initial_metadata), std::move(arena)));
} }
~CallSpine() override {} ~CallSpine() override { CallOnDone(true); }
CallFilters& call_filters() { return call_filters_; } CallFilters& call_filters() { return call_filters_; }
// Add a callback to be called when server trailing metadata is received. // Add a callback to be called when server trailing metadata is received.
void OnDone(absl::AnyInvocable<void()> fn) { void OnDone(absl::AnyInvocable<void(bool)> fn) {
if (on_done_ == nullptr) { if (on_done_ == nullptr) {
on_done_ = std::move(fn); on_done_ = std::move(fn);
return; return;
} }
on_done_ = [first = std::move(fn), next = std::move(on_done_)]() mutable { on_done_ = [first = std::move(fn),
first(); next = std::move(on_done_)](bool cancelled) mutable {
next(); first(cancelled);
next(cancelled);
}; };
} }
void CallOnDone() { void CallOnDone(bool cancelled) {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)(); if (on_done_ != nullptr) std::exchange(on_done_, nullptr)(cancelled);
} }
auto PullServerInitialMetadata() { auto PullServerInitialMetadata() {
@ -75,7 +76,12 @@ class CallSpine final : public Party {
} }
auto PullServerTrailingMetadata() { auto PullServerTrailingMetadata() {
return call_filters().PullServerTrailingMetadata(); return Map(
call_filters().PullServerTrailingMetadata(),
[this](ServerMetadataHandle result) {
CallOnDone(result->get(GrpcCallWasCancelled()).value_or(false));
return result;
});
} }
auto PushClientToServerMessage(MessageHandle message) { auto PushClientToServerMessage(MessageHandle message) {
@ -190,7 +196,7 @@ class CallSpine final : public Party {
// Call filters/pipes part of the spine // Call filters/pipes part of the spine
CallFilters call_filters_; CallFilters call_filters_;
absl::AnyInvocable<void()> on_done_{nullptr}; absl::AnyInvocable<void(bool)> on_done_{nullptr};
}; };
class CallInitiator { class CallInitiator {
@ -227,7 +233,9 @@ class CallInitiator {
spine_->PushServerTrailingMetadata(std::move(status)); spine_->PushServerTrailingMetadata(std::move(status));
} }
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); } void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename PromiseFactory> template <typename PromiseFactory>
void SpawnGuarded(absl::string_view name, PromiseFactory promise_factory) { void SpawnGuarded(absl::string_view name, PromiseFactory promise_factory) {
@ -274,7 +282,9 @@ class CallHandler {
spine_->PushServerTrailingMetadata(std::move(status)); spine_->PushServerTrailingMetadata(std::move(status));
} }
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); } void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise> template <typename Promise>
auto CancelIfFails(Promise promise) { auto CancelIfFails(Promise promise) {
@ -327,7 +337,9 @@ class UnstartedCallHandler {
spine_->PushServerTrailingMetadata(std::move(status)); spine_->PushServerTrailingMetadata(std::move(status));
} }
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); } void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise> template <typename Promise>
auto CancelIfFails(Promise promise) { auto CancelIfFails(Promise promise) {

@ -117,10 +117,9 @@ ConnectivityStateTracker::~ConnectivityStateTracker() {
void ConnectivityStateTracker::AddWatcher( void ConnectivityStateTracker::AddWatcher(
grpc_connectivity_state initial_state, grpc_connectivity_state initial_state,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) { OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) { GRPC_TRACE_LOG(connectivity_state, INFO)
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this << "ConnectivityStateTracker " << name_ << "[" << this
<< "]: add watcher " << watcher.get(); << "]: add watcher " << watcher.get();
}
grpc_connectivity_state current_state = grpc_connectivity_state current_state =
state_.load(std::memory_order_relaxed); state_.load(std::memory_order_relaxed);
if (initial_state != current_state) { if (initial_state != current_state) {
@ -141,10 +140,9 @@ void ConnectivityStateTracker::AddWatcher(
void ConnectivityStateTracker::RemoveWatcher( void ConnectivityStateTracker::RemoveWatcher(
ConnectivityStateWatcherInterface* watcher) { ConnectivityStateWatcherInterface* watcher) {
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) { GRPC_TRACE_LOG(connectivity_state, INFO)
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this << "ConnectivityStateTracker " << name_ << "[" << this
<< "]: remove watcher " << watcher; << "]: remove watcher " << watcher;
}
watchers_.erase(watcher); watchers_.erase(watcher);
} }
@ -178,10 +176,9 @@ void ConnectivityStateTracker::SetState(grpc_connectivity_state state,
grpc_connectivity_state ConnectivityStateTracker::state() const { grpc_connectivity_state ConnectivityStateTracker::state() const {
grpc_connectivity_state state = state_.load(std::memory_order_relaxed); grpc_connectivity_state state = state_.load(std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) { GRPC_TRACE_LOG(connectivity_state, INFO)
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this << "ConnectivityStateTracker " << name_ << "[" << this
<< "]: get current state: " << ConnectivityStateName(state); << "]: get current state: " << ConnectivityStateName(state);
}
return state; return state;
} }

@ -948,10 +948,9 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() { void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr); CHECK_NE(lb_call_, nullptr);
if (GRPC_TRACE_FLAG_ENABLED(glb)) { GRPC_TRACE_LOG(glb, INFO)
LOG(INFO) << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
<< ": Starting LB call " << lb_call_; << ": Starting LB call " << lb_call_;
}
// Create the ops. // Create the ops.
grpc_call_error call_error; grpc_call_error call_error;
grpc_op ops[3]; grpc_op ops[3];
@ -1527,10 +1526,9 @@ class GrpcLb::NullLbTokenEndpointIterator final
void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback) void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback)
const override { const override {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) { parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) { GRPC_TRACE_LOG(glb, INFO)
LOG(INFO) << "[grpclb " << this << "[grpclb " << this
<< "] fallback address: " << endpoint.ToString(); << "] fallback address: " << endpoint.ToString();
}
callback(EndpointAddresses(endpoint.addresses(), callback(EndpointAddresses(endpoint.addresses(),
endpoint.args().SetObject(empty_token_))); endpoint.args().SetObject(empty_token_)));
}); });
@ -1765,10 +1763,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper")); std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace); MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace);
if (GRPC_TRACE_FLAG_ENABLED(glb)) { GRPC_TRACE_LOG(glb, INFO)
LOG(INFO) << "[grpclb " << this << "] Created new child policy handler (" << "[grpclb " << this << "] Created new child policy handler ("
<< lb_policy.get() << ")"; << lb_policy.get() << ")";
}
// Add the gRPC LB's interested_parties pollset_set to that of the newly // Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon // created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call. // activity on gRPC LB, which in turn is tied to the application's call.
@ -1819,10 +1816,9 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
child_policy_ = CreateChildPolicyLocked(update_args.args); child_policy_ = CreateChildPolicyLocked(update_args.args);
} }
// Update the policy. // Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(glb)) { GRPC_TRACE_LOG(glb, INFO)
LOG(INFO) << "[grpclb " << this << "] Updating child policy handler " << "[grpclb " << this << "] Updating child policy handler "
<< child_policy_.get(); << child_policy_.get();
}
// TODO(roth): If we're in fallback mode and the child policy rejects the // TODO(roth): If we're in fallback mode and the child policy rejects the
// update, we should propagate that failure back to the resolver somehow. // update, we should propagate that failure back to the resolver somehow.
(void)child_policy_->UpdateLocked(std::move(update_args)); (void)child_policy_->UpdateLocked(std::move(update_args));
@ -1864,10 +1860,9 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
subchannel_cache_timer_handle_.reset(); subchannel_cache_timer_handle_.reset();
auto it = cached_subchannels_.begin(); auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) { if (it != cached_subchannels_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) { GRPC_TRACE_LOG(glb, INFO)
LOG(INFO) << "[grpclb " << this << "] removing " << it->second.size() << "[grpclb " << this << "] removing " << it->second.size()
<< " subchannels from cache"; << " subchannels from cache";
}
cached_subchannels_.erase(it); cached_subchannels_.erase(it);
} }
if (!cached_subchannels_.empty()) { if (!cached_subchannels_.empty()) {

@ -341,10 +341,9 @@ class HealthProducer::ConnectivityWatcher final
// //
void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) { void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) { GRPC_TRACE_LOG(health_check_client, INFO)
LOG(INFO) << "HealthProducer " << this << ": starting with subchannel " << "HealthProducer " << this << ": starting with subchannel "
<< subchannel.get(); << subchannel.get();
}
subchannel_ = std::move(subchannel); subchannel_ = std::move(subchannel);
{ {
MutexLock lock(&mu_); MutexLock lock(&mu_);
@ -498,10 +497,9 @@ MakeHealthCheckWatcher(
health_check_service_name = health_check_service_name =
args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME); args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME);
} }
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) { GRPC_TRACE_LOG(health_check_client, INFO)
LOG(INFO) << "creating HealthWatcher -- health_check_service_name=\"" << "creating HealthWatcher -- health_check_service_name=\""
<< health_check_service_name.value_or("N/A") << "\""; << health_check_service_name.value_or("N/A") << "\"";
}
return std::make_unique<HealthWatcher>(std::move(work_serializer), return std::make_unique<HealthWatcher>(std::move(work_serializer),
std::move(health_check_service_name), std::move(health_check_service_name),
std::move(watcher)); std::move(watcher));

@ -274,10 +274,8 @@ void OrcaProducer::MaybeStartStreamLocked() {
void OrcaProducer::NotifyWatchers( void OrcaProducer::NotifyWatchers(
const BackendMetricData& backend_metric_data) { const BackendMetricData& backend_metric_data) {
if (GRPC_TRACE_FLAG_ENABLED(orca_client)) { GRPC_TRACE_LOG(orca_client, INFO)
LOG(INFO) << "OrcaProducer " << this << "OrcaProducer " << this << ": reporting backend metrics to watchers";
<< ": reporting backend metrics to watchers";
}
MutexLock lock(&mu_); MutexLock lock(&mu_);
for (OrcaWatcher* watcher : watchers_) { for (OrcaWatcher* watcher : watchers_) {
watcher->watcher()->OnBackendMetricReport(backend_metric_data); watcher->watcher()->OnBackendMetricReport(backend_metric_data);

@ -580,10 +580,9 @@ OutlierDetectionLb::OutlierDetectionLb(Args args)
} }
OutlierDetectionLb::~OutlierDetectionLb() { OutlierDetectionLb::~OutlierDetectionLb() {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] destroying outlier_detection LB policy"; << "] destroying outlier_detection LB policy";
}
} }
void OutlierDetectionLb::ShutdownLocked() { void OutlierDetectionLb::ShutdownLocked() {
@ -622,10 +621,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Update outlier detection timer. // Update outlier detection timer.
if (!config_->CountingEnabled()) { if (!config_->CountingEnabled()) {
// No need for timer. Cancel the current timer, if any. // No need for timer. Cancel the current timer, if any.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] counting disabled, cancelling timer"; << "] counting disabled, cancelling timer";
}
ejection_timer_.reset(); ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) { } else if (ejection_timer_ == nullptr) {
// No timer running. Start it now. // No timer running. Start it now.
@ -643,10 +641,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// with the same start time. // with the same start time.
// Note that if the new deadline is in the past, the timer will fire // Note that if the new deadline is in the past, the timer will fire
// immediately. // immediately.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] interval changed, replacing timer"; << "] interval changed, replacing timer";
}
ejection_timer_ = MakeOrphanable<EjectionTimer>( ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime()); RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime());
} }
@ -663,10 +660,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Find the entry in the endpoint map. // Find the entry in the endpoint map.
auto it = endpoint_state_map_.find(key); auto it = endpoint_state_map_.find(key);
if (it == endpoint_state_map_.end()) { if (it == endpoint_state_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] adding endpoint entry for " << key.ToString(); << "] adding endpoint entry for " << key.ToString();
}
// The endpoint is not present in the map, so we'll need to add it. // The endpoint is not present in the map, so we'll need to add it.
// Start by getting a pointer to the entry for each address in the // Start by getting a pointer to the entry for each address in the
// subchannel map, creating the entry if needed. // subchannel map, creating the entry if needed.
@ -722,10 +718,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
for (auto it = endpoint_state_map_.begin(); for (auto it = endpoint_state_map_.begin();
it != endpoint_state_map_.end();) { it != endpoint_state_map_.end();) {
if (current_endpoints.find(it->first) == current_endpoints.end()) { if (current_endpoints.find(it->first) == current_endpoints.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] removing endpoint map entry " << it->first.ToString(); << "] removing endpoint map entry " << it->first.ToString();
}
it = endpoint_state_map_.erase(it); it = endpoint_state_map_.erase(it);
} else { } else {
++it; ++it;
@ -742,10 +737,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note); update_args.resolution_note = std::move(args.resolution_note);
update_args.config = config_->child_policy(); update_args.config = config_->child_policy();
update_args.args = std::move(args.args); update_args.args = std::move(args.args);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this << "] Updating child policy handler "
<< "] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -774,10 +768,9 @@ OrphanablePtr<LoadBalancingPolicy> OutlierDetectionLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&outlier_detection_lb_trace); &outlier_detection_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << this << "[outlier_detection_lb " << this
<< "] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -842,10 +835,9 @@ OutlierDetectionLb::EjectionTimer::EjectionTimer(
RefCountedPtr<OutlierDetectionLb> parent, Timestamp start_time) RefCountedPtr<OutlierDetectionLb> parent, Timestamp start_time)
: parent_(std::move(parent)), start_time_(start_time) { : parent_(std::move(parent)), start_time_(start_time) {
auto interval = parent_->config_->outlier_detection_config().interval; auto interval = parent_->config_->outlier_detection_config().interval;
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << parent_.get() << "[outlier_detection_lb " << parent_.get()
<< "] ejection timer will run in " << interval.ToString(); << "] ejection timer will run in " << interval.ToString();
}
timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter( timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter(
interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable { interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx; ApplicationCallbackExecCtx callback_exec_ctx;
@ -868,10 +860,9 @@ void OutlierDetectionLb::EjectionTimer::Orphan() {
void OutlierDetectionLb::EjectionTimer::OnTimerLocked() { void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!timer_handle_.has_value()) return; if (!timer_handle_.has_value()) return;
timer_handle_.reset(); timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << parent_.get() << "[outlier_detection_lb " << parent_.get()
<< "] ejection timer running"; << "] ejection timer running";
}
std::map<EndpointState*, double> success_rate_ejection_candidates; std::map<EndpointState*, double> success_rate_ejection_candidates;
std::map<EndpointState*, double> failure_percentage_ejection_candidates; std::map<EndpointState*, double> failure_percentage_ejection_candidates;
size_t ejected_host_count = 0; size_t ejected_host_count = 0;
@ -967,10 +958,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) { (current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in // Eject and record the timestamp for use when ejecting addresses in
// this iteration. // this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << parent_.get() << "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate"; << "] ejecting candidate";
}
candidate.first->Eject(time_now); candidate.first->Eject(time_now);
++ejected_host_count; ++ejected_host_count;
} }
@ -1014,10 +1004,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) { (current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in // Eject and record the timestamp for use when ejecting addresses in
// this iteration. // this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) { GRPC_TRACE_LOG(outlier_detection_lb, INFO)
LOG(INFO) << "[outlier_detection_lb " << parent_.get() << "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate"; << "] ejecting candidate";
}
candidate.first->Eject(time_now); candidate.first->Eject(time_now);
++ejected_host_count; ++ejected_host_count;
} }

@ -631,10 +631,10 @@ void PickFirst::GoIdle() {
void PickFirst::HealthWatcher::OnConnectivityStateChange( void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) { grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return; if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: " << "[PF " << policy_.get()
<< ConnectivityStateName(new_state) << " (" << status << ")"; << "] health watch state update: " << ConnectivityStateName(new_state)
} << " (" << status << ")";
switch (new_state) { switch (new_state) {
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState( policy_->channel_control_helper()->UpdateState(
@ -672,10 +672,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
: subchannel_data_(subchannel_data), : subchannel_data_(subchannel_data),
pick_first_(subchannel_data_->subchannel_list_->policy_), pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) { subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get() << "): starting watch"; << " (subchannel " << subchannel_.get() << "): starting watch";
}
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher")); auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get(); watcher_ = watcher.get();
subchannel_->WatchConnectivityState(std::move(watcher)); subchannel_->WatchConnectivityState(std::move(watcher));
@ -696,10 +695,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
} }
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() { void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "Pick First " << pick_first_.get() << " selected subchannel " << "Pick First " << pick_first_.get() << " selected subchannel "
<< subchannel_.get(); << subchannel_.get();
}
CHECK_NE(subchannel_data_, nullptr); CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any. pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_); pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_);
@ -1006,10 +1004,9 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING) args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove( .Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) { GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list " << "[PF " << policy_.get() << "] Creating subchannel list " << this
<< this << " - channel args: " << args_.ToString(); << " - channel args: " << args_.ToString();
}
if (addresses == nullptr) return; if (addresses == nullptr) return;
// Create a subchannel for each address. // Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) { addresses->ForEach([&](const EndpointAddresses& address) {
@ -1037,17 +1034,13 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
} }
PickFirst::SubchannelList::~SubchannelList() { PickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list " << "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
<< this;
}
} }
void PickFirst::SubchannelList::Orphan() { void PickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list " << "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
<< this;
}
CHECK(!shutting_down_); CHECK(!shutting_down_);
shutting_down_ = true; shutting_down_ = true;
// Cancel Happy Eyeballs timer, if any. // Cancel Happy Eyeballs timer, if any.
@ -1089,10 +1082,9 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE, // We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to // so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel. // connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this << "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels"; << " failed to connect to all subchannels";
}
// Re-resolve and report TRANSIENT_FAILURE. // Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution(); policy_->channel_control_helper()->RequestReresolution();
absl::Status status = absl::UnavailableError( absl::Status status = absl::UnavailableError(
@ -1564,10 +1556,10 @@ void OldPickFirst::UnsetSelectedSubchannel() {
void OldPickFirst::HealthWatcher::OnConnectivityStateChange( void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) { grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return; if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: " << "[PF " << policy_.get()
<< ConnectivityStateName(new_state) << " (" << status << ")"; << "] health watch state update: " << ConnectivityStateName(new_state)
} << " (" << status << ")";
switch (new_state) { switch (new_state) {
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState( policy_->channel_control_helper()->UpdateState(
@ -1912,10 +1904,8 @@ void OldPickFirst::SubchannelList::SubchannelData::
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
} }
// Cases 1 and 2. // Cases 1 and 2.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "Pick First " << p << " selected subchannel " << "Pick First " << p << " selected subchannel " << subchannel_.get();
<< subchannel_.get();
}
p->selected_ = this; p->selected_ = this;
// If health checking is enabled, start the health watch, but don't // If health checking is enabled, start the health watch, but don't
// report a new picker -- we want to stay in CONNECTING while we wait // report a new picker -- we want to stay in CONNECTING while we wait
@ -1957,10 +1947,9 @@ OldPickFirst::SubchannelList::SubchannelList(
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING) args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove( .Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) { GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list " << "[PF " << policy_.get() << "] Creating subchannel list " << this
<< this << " - channel args: " << args_.ToString(); << " - channel args: " << args_.ToString();
}
if (addresses == nullptr) return; if (addresses == nullptr) return;
// Create a subchannel for each address. // Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) { addresses->ForEach([&](const EndpointAddresses& address) {
@ -1987,17 +1976,13 @@ OldPickFirst::SubchannelList::SubchannelList(
} }
OldPickFirst::SubchannelList::~SubchannelList() { OldPickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list " << "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
<< this;
}
} }
void OldPickFirst::SubchannelList::Orphan() { void OldPickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list " << "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
<< this;
}
CHECK(!shutting_down_); CHECK(!shutting_down_);
shutting_down_ = true; shutting_down_ = true;
for (auto& sd : subchannels_) { for (auto& sd : subchannels_) {
@ -2041,10 +2026,9 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE, // We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to // so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel. // connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) { GRPC_TRACE_LOG(pick_first, INFO)
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this << "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels"; << " failed to connect to all subchannels";
}
// In case 2, swap to the new subchannel list. This means reporting // In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection, // TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us. // but we can't ignore what the control plane has told us.

@ -402,10 +402,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) { ++priority) {
// If the child for the priority does not exist yet, create it. // If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority]; const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) { GRPC_TRACE_LOG(priority_lb, INFO)
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority << "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name; << ", child " << child_name;
}
auto& child = children_[child_name]; auto& child = children_[child_name];
// Create child if needed. // Create child if needed.
if (child == nullptr) { if (child == nullptr) {
@ -461,10 +460,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) { ++priority) {
// If the child for the priority does not exist yet, create it. // If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority]; const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) { GRPC_TRACE_LOG(priority_lb, INFO)
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority << "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name; << ", child " << child_name;
}
auto& child = children_[child_name]; auto& child = children_[child_name];
CHECK(child != nullptr); CHECK(child != nullptr);
if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) { if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) {
@ -626,19 +624,17 @@ void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked() {
PriorityLb::ChildPriority::ChildPriority( PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name) RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) { : priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) { GRPC_TRACE_LOG(priority_lb, INFO)
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "[priority_lb " << priority_policy_.get() << "] creating child "
<< "] creating child " << name_ << " (" << this << ")"; << name_ << " (" << this << ")";
}
// Start the failover timer. // Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref()); failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
} }
void PriorityLb::ChildPriority::Orphan() { void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) { GRPC_TRACE_LOG(priority_lb, INFO)
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child " << "[priority_lb " << priority_policy_.get() << "] child " << name_
<< name_ << " (" << this << "): orphaned"; << " (" << this << "): orphaned";
}
failover_timer_.reset(); failover_timer_.reset();
deactivation_timer_.reset(); deactivation_timer_.reset();
// Remove the child policy's interested_parties pollset_set from the // Remove the child policy's interested_parties pollset_set from the
@ -665,10 +661,9 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config, RefCountedPtr<LoadBalancingPolicy::Config> config,
bool ignore_reresolution_requests) { bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus(); if (priority_policy_->shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) { GRPC_TRACE_LOG(priority_lb, INFO)
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child " << "[priority_lb " << priority_policy_.get() << "] child " << name_
<< name_ << " (" << this << "): start update"; << " (" << this << "): start update";
}
ignore_reresolution_requests_ = ignore_reresolution_requests; ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed. // Create policy if needed.
if (child_policy_ == nullptr) { if (child_policy_ == nullptr) {

@ -655,10 +655,9 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
} }
}); });
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) { GRPC_TRACE_LOG(ring_hash_lb, INFO)
LOG(INFO) << "[RH " << this << "] received update with addresses error: " << "[RH " << this << "] received update with addresses error: "
<< args.addresses.status(); << args.addresses.status();
}
// If we already have an endpoint list, then keep using the existing // If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (!endpoints_.empty()) return args.addresses.status(); if (!endpoints_.empty()) return args.addresses.status();

@ -1039,10 +1039,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
BuildKeyMap(config_->key_builder_map(), args.path, BuildKeyMap(config_->key_builder_map(), args.path,
lb_policy_->channel_control_helper()->GetAuthority(), lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)}; args.initial_metadata)};
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": request keys: " << key.ToString(); << ": request keys: " << key.ToString();
}
Timestamp now = Timestamp::Now(); Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_); MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) { if (lb_policy_->is_shutdown_) {
@ -1077,10 +1076,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
if (entry != nullptr) { if (entry != nullptr) {
// If the entry has non-expired data, use it. // If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) { if (entry->data_expiration_time() >= now) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": using cache entry " << entry; << ": using cache entry " << entry;
}
return entry->Pick(args); return entry->Pick(args);
} }
// If the entry is in backoff, then use the default target if set, // If the entry is in backoff, then use the default target if set,
@ -1093,29 +1091,26 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
} }
} }
// RLS call pending. Queue the pick. // RLS call pending. Queue the pick.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": RLS request pending; queuing pick"; << ": RLS request pending; queuing pick";
}
return PickResult::Queue(); return PickResult::Queue();
} }
LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail( LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) { const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) { if (default_child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; using default target"; << reason << "; using default target";
}
auto pick_result = default_child_policy_->Pick(args); auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks, lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
config_->default_target(), pick_result); config_->default_target(), pick_result);
return pick_result; return pick_result;
} }
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << reason
<< reason << "; failing pick"; << "; failing pick";
}
auto& stats_plugins = auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup(); lb_policy_->channel_control_helper()->GetStatsPluginGroup();
stats_plugins.AddCounter(kMetricFailedPicks, 1, stats_plugins.AddCounter(kMetricFailedPicks, 1,
@ -1204,10 +1199,9 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
lb_policy_->cache_.lru_list_.end(), key)) {} lb_policy_->cache_.lru_list_.end(), key)) {}
void RlsLb::Cache::Entry::Orphan() { void RlsLb::Cache::Entry::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this << "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< " " << lru_iterator_->ToString() << ": cache entry evicted"; << lru_iterator_->ToString() << ": cache entry evicted";
}
is_shutdown_ = true; is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_); lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case. lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
@ -1403,26 +1397,22 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key); lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key);
map_.emplace(key, OrphanablePtr<Entry>(entry)); map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size; size_ += entry_size;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString() << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": cache entry added, entry=" << entry; << ": cache entry added, entry=" << entry;
}
return entry; return entry;
} }
// Entry found, so use it. // Entry found, so use it.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString() << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": found cache entry " << it->second.get(); << ": found cache entry " << it->second.get();
}
it->second->MarkUsed(); it->second->MarkUsed();
return it->second.get(); return it->second.get();
} }
void RlsLb::Cache::Resize(size_t bytes) { void RlsLb::Cache::Resize(size_t bytes) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
<< " bytes";
}
size_limit_ = bytes; size_limit_ = bytes;
MaybeShrinkSize(size_limit_); MaybeShrinkSize(size_limit_);
} }
@ -1507,10 +1497,9 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
auto map_it = map_.find(*lru_it); auto map_it = map_.find(*lru_it);
CHECK(map_it != map_.end()); CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break; if (!map_it->second->CanEvict()) break;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry " << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString(); << map_it->second.get() << " " << lru_it->ToString();
}
size_ -= map_it->second->Size(); size_ -= map_it->second->Size();
map_.erase(map_it); map_.erase(map_it);
} }
@ -1648,10 +1637,9 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
} }
void RlsLb::RlsChannel::Orphan() { void RlsLb::RlsChannel::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ", channel=" << channel_.get() << ": shutdown"; << ", channel=" << channel_.get() << ": shutdown";
}
is_shutdown_ = true; is_shutdown_ = true;
if (channel_ != nullptr) { if (channel_ != nullptr) {
// Remove channelz linkage. // Remove channelz linkage.
@ -1715,10 +1703,9 @@ RlsLb::RlsRequest::RlsRequest(
backoff_state_(std::move(backoff_state)), backoff_state_(std::move(backoff_state)),
reason_(reason), reason_(reason),
stale_header_data_(std::move(stale_header_data)) { stale_header_data_(std::move(stale_header_data)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< ": RLS request created for key " << key_.ToString(); << ": RLS request created for key " << key_.ToString();
}
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr); GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run( ExecCtx::Run(
DEBUG_LOCATION, DEBUG_LOCATION,
@ -1731,10 +1718,9 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() { void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) { if (call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this << "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
<< " " << key_.ToString() << ": cancelling RLS call"; << key_.ToString() << ": cancelling RLS call";
}
grpc_call_cancel_internal(call_); grpc_call_cancel_internal(call_);
} }
Unref(DEBUG_LOCATION, "Orphan"); Unref(DEBUG_LOCATION, "Orphan");
@ -2031,10 +2017,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
config_->default_target()); config_->default_target());
created_default_child = true; created_default_child = true;
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << this << "[rlslb " << this << "] using existing child for default target";
<< "] using existing child for default target";
}
default_child_policy_ = default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy"); it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
} }
@ -2063,10 +2047,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
p.second->StartUpdate(); p.second->StartUpdate();
} }
} else if (created_default_child) { } else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << this << "[rlslb " << this << "] starting default child policy update";
<< "] starting default child policy update";
}
default_child_policy_->StartUpdate(); default_child_policy_->StartUpdate();
} }
} }
@ -2084,10 +2066,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
} }
} }
} else if (created_default_child) { } else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << this << "[rlslb " << this << "] finishing default child policy update";
<< "] finishing default child policy update";
}
absl::Status status = default_child_policy_->MaybeFinishUpdate(); absl::Status status = default_child_policy_->MaybeFinishUpdate();
if (!status.ok()) { if (!status.ok()) {
errors.emplace_back(absl::StrCat("target ", config_->default_target(), errors.emplace_back(absl::StrCat("target ", config_->default_target(),
@ -2186,10 +2166,9 @@ void RlsLb::UpdatePickerLocked() {
if (is_shutdown_) return; if (is_shutdown_) return;
for (auto& p : child_policy_map_) { for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state(); grpc_connectivity_state child_state = p.second->connectivity_state();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO)
LOG(INFO) << "[rlslb " << this << "] target " << p.second->target() << "[rlslb " << this << "] target " << p.second->target()
<< " in state " << ConnectivityStateName(child_state); << " in state " << ConnectivityStateName(child_state);
}
if (child_state == GRPC_CHANNEL_READY) { if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY; state = GRPC_CHANNEL_READY;
break; break;
@ -2208,10 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
} }
} }
} }
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) { GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] reporting state "
LOG(INFO) << "[rlslb " << this << "] reporting state " << ConnectivityStateName(state);
<< ConnectivityStateName(state);
}
absl::Status status; absl::Status status;
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
status = absl::UnavailableError("no children available"); status = absl::UnavailableError("no children available");

@ -246,10 +246,9 @@ absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
} }
addresses = args.addresses->get(); addresses = args.addresses->get();
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) { GRPC_TRACE_LOG(round_robin, INFO)
LOG(INFO) << "[RR " << this << "] received update with address error: " << "[RR " << this
<< args.addresses.status(); << "] received update with address error: " << args.addresses.status();
}
// If we already have a child list, then keep using the existing // If we already have a child list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status(); if (endpoint_list_ != nullptr) return args.addresses.status();
@ -313,10 +312,9 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
<< status << ")"; << status << ")";
} }
if (new_state == GRPC_CHANNEL_IDLE) { if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) { GRPC_TRACE_LOG(round_robin, INFO)
LOG(INFO) << "[RR " << round_robin << "] child " << this << "[RR " << round_robin << "] child " << this
<< " reported IDLE; requesting connection"; << " reported IDLE; requesting connection";
}
ExitIdleLocked(); ExitIdleLocked();
} }
// If state changed, update state counters. // If state changed, update state counters.
@ -396,10 +394,9 @@ void RoundRobin::RoundRobinEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING. // 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE. // 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) { if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) { GRPC_TRACE_LOG(round_robin, INFO)
LOG(INFO) << "[RR " << round_robin << "] reporting READY with child list " << "[RR " << round_robin << "] reporting READY with child list "
<< this; << this;
}
std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers; std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers;
for (const auto& endpoint : endpoints()) { for (const auto& endpoint : endpoints()) {
auto state = endpoint->connectivity_state(); auto state = endpoint->connectivity_state();
@ -412,10 +409,9 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_READY, absl::OkStatus(), GRPC_CHANNEL_READY, absl::OkStatus(),
MakeRefCounted<Picker>(round_robin, std::move(pickers))); MakeRefCounted<Picker>(round_robin, std::move(pickers)));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) { GRPC_TRACE_LOG(round_robin, INFO)
LOG(INFO) << "[RR " << round_robin << "[RR " << round_robin << "] reporting CONNECTING with child list "
<< "] reporting CONNECTING with child list " << this; << this;
}
round_robin->channel_control_helper()->UpdateState( round_robin->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(), GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr)); MakeRefCounted<QueuePicker>(nullptr));

@ -567,18 +567,14 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
} }
WeightedRoundRobin::Picker::~Picker() { WeightedRoundRobin::Picker::~Picker() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this << "[WRR " << wrr_.get() << " picker " << this << "] destroying picker";
<< "] destroying picker";
}
} }
void WeightedRoundRobin::Picker::Orphaned() { void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_); MutexLock lock(&timer_mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this << "[WRR " << wrr_.get() << " picker " << this << "] cancelling timer";
<< "] cancelling timer";
}
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_); wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset(); timer_handle_.reset();
wrr_.reset(); wrr_.reset();
@ -644,25 +640,22 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
stats_plugins.AddCounter(kMetricEndpointWeightStale, num_stale, stats_plugins.AddCounter(kMetricEndpointWeightStale, num_stale,
{wrr_->channel_control_helper()->GetTarget()}, {wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_}); {wrr_->locality_name_});
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this << "[WRR " << wrr_.get() << " picker " << this
<< "] new weights: " << absl::StrJoin(weights, " "); << "] new weights: " << absl::StrJoin(weights, " ");
}
auto scheduler_or = StaticStrideScheduler::Make( auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); }); weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
std::shared_ptr<StaticStrideScheduler> scheduler; std::shared_ptr<StaticStrideScheduler> scheduler;
if (scheduler_or.has_value()) { if (scheduler_or.has_value()) {
scheduler = scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or)); std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this << "[WRR " << wrr_.get() << " picker " << this
<< "] new scheduler: " << scheduler.get(); << "] new scheduler: " << scheduler.get();
}
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this << "[WRR " << wrr_.get() << " picker " << this
<< "] no scheduler, falling back to RR"; << "] no scheduler, falling back to RR";
}
stats_plugins.AddCounter(kMetricRrFallback, 1, stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()}, {wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_}); {wrr_->locality_name_});
@ -690,10 +683,9 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{ {
MutexLock lock(&self->timer_mu_); MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) { if (self->timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << self->wrr_.get() << " picker " << "[WRR " << self->wrr_.get() << " picker " << self.get()
<< self.get() << "] timer fired"; << "] timer fired";
}
self->BuildSchedulerAndStartTimerLocked(); self->BuildSchedulerAndStartTimerLocked();
} }
} }
@ -715,10 +707,9 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
locality_name_(channel_args() locality_name_(channel_args()
.GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD) .GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD)
.value_or("")) { .value_or("")) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << this << "] Created -- locality_name=\"" << "[WRR " << this << "] Created -- locality_name=\""
<< std::string(locality_name_) << "\""; << std::string(locality_name_) << "\"";
}
} }
WeightedRoundRobin::~WeightedRoundRobin() { WeightedRoundRobin::~WeightedRoundRobin() {
@ -778,10 +769,9 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
std::make_shared<EndpointAddressesListIterator>(EndpointAddressesList( std::make_shared<EndpointAddressesListIterator>(EndpointAddressesList(
ordered_addresses.begin(), ordered_addresses.end())); ordered_addresses.begin(), ordered_addresses.end()));
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << this << "] received update with address error: " << "[WRR " << this << "] received update with address error: "
<< args.addresses.status().ToString(); << args.addresses.status().ToString();
}
// If we already have an endpoint list, then keep using the existing // If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status(); if (endpoint_list_ != nullptr) return args.addresses.status();
@ -893,10 +883,9 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
<< status << ")"; << status << ")";
} }
if (new_state == GRPC_CHANNEL_IDLE) { if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr << "] child " << this << "[WRR " << wrr << "] child " << this
<< " reported IDLE; requesting connection"; << " reported IDLE; requesting connection";
}
ExitIdleLocked(); ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) { } else if (new_state == GRPC_CHANNEL_READY) {
// If we transition back to READY state, restart the blackout period. // If we transition back to READY state, restart the blackout period.
@ -990,18 +979,15 @@ void WeightedRoundRobin::WrrEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING. // 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE. // 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) { if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr << "] reporting READY with endpoint list " << "[WRR " << wrr << "] reporting READY with endpoint list " << this;
<< this;
}
wrr->channel_control_helper()->UpdateState( wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(), GRPC_CHANNEL_READY, absl::Status(),
MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this)); MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) { GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
LOG(INFO) << "[WRR " << wrr << "[WRR " << wrr << "] reporting CONNECTING with endpoint list "
<< "] reporting CONNECTING with endpoint list " << this; << this;
}
wrr->channel_control_helper()->UpdateState( wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(), GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr)); MakeRefCounted<QueuePicker>(nullptr));

@ -290,10 +290,9 @@ WeightedTargetLb::WeightedTargetLb(Args args)
} }
WeightedTargetLb::~WeightedTargetLb() { WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) { GRPC_TRACE_LOG(weighted_target_lb, INFO)
LOG(INFO) << "[weighted_target_lb " << this << "[weighted_target_lb " << this
<< "] destroying weighted_target LB policy"; << "] destroying weighted_target LB policy";
}
} }
void WeightedTargetLb::ShutdownLocked() { void WeightedTargetLb::ShutdownLocked() {
@ -382,10 +381,9 @@ void WeightedTargetLb::UpdateStateLocked() {
// all children. This avoids unnecessary picker churn while an update // all children. This avoids unnecessary picker churn while an update
// is being propagated to our children. // is being propagated to our children.
if (update_in_progress_) return; if (update_in_progress_) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) { GRPC_TRACE_LOG(weighted_target_lb, INFO)
LOG(INFO) << "[weighted_target_lb " << this << "[weighted_target_lb " << this
<< "] scanning children to determine connectivity state"; << "] scanning children to determine connectivity state";
}
// Construct lists of child pickers with associated weights, one for // Construct lists of child pickers with associated weights, one for
// children that are in state READY and another for children that are // children that are in state READY and another for children that are
// in state TRANSIENT_FAILURE. Each child is represented by a portion of // in state TRANSIENT_FAILURE. Each child is represented by a portion of
@ -450,10 +448,9 @@ void WeightedTargetLb::UpdateStateLocked() {
} else { } else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
} }
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) { GRPC_TRACE_LOG(weighted_target_lb, INFO)
LOG(INFO) << "[weighted_target_lb " << this << "] connectivity changed to " << "[weighted_target_lb " << this << "] connectivity changed to "
<< ConnectivityStateName(connectivity_state); << ConnectivityStateName(connectivity_state);
}
RefCountedPtr<SubchannelPicker> picker; RefCountedPtr<SubchannelPicker> picker;
absl::Status status; absl::Status status;
switch (connectivity_state) { switch (connectivity_state) {
@ -525,10 +522,9 @@ WeightedTargetLb::WeightedChild::WeightedChild(
: weighted_target_policy_(std::move(weighted_target_policy)), : weighted_target_policy_(std::move(weighted_target_policy)),
name_(name), name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) { picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) { GRPC_TRACE_LOG(weighted_target_lb, INFO)
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get() << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] created WeightedChild " << this << " for " << name_; << "] created WeightedChild " << this << " for " << name_;
}
} }
WeightedTargetLb::WeightedChild::~WeightedChild() { WeightedTargetLb::WeightedChild::~WeightedChild() {
@ -657,10 +653,9 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
void WeightedTargetLb::WeightedChild::DeactivateLocked() { void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again. // If already deactivated, don't do that again.
if (weight_ == 0) return; if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) { GRPC_TRACE_LOG(weighted_target_lb, INFO)
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get() << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": deactivating"; << "] WeightedChild " << this << " " << name_ << ": deactivating";
}
// Set the child weight to 0 so that future picker won't contain this child. // Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0; weight_ = 0;
// Start a timer to delete the child. // Start a timer to delete the child.

@ -662,10 +662,9 @@ Json CdsLb::CreateChildPolicyConfigForLeafCluster(
{"outlier_detection_experimental", {"outlier_detection_experimental",
Json::FromObject(std::move(outlier_detection_config))}, Json::FromObject(std::move(outlier_detection_config))},
})}); })});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) { GRPC_TRACE_LOG(cds_lb, INFO)
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: " << "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(outlier_detection_policy, /*indent=*/1); << JsonDump(outlier_detection_policy, /*indent=*/1);
}
return outlier_detection_policy; return outlier_detection_policy;
} }
@ -695,10 +694,9 @@ Json CdsLb::CreateChildPolicyConfigForAggregateCluster(
{"priorities", Json::FromArray(std::move(priority_priorities))}, {"priorities", Json::FromArray(std::move(priority_priorities))},
})}, })},
})}); })});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) { GRPC_TRACE_LOG(cds_lb, INFO)
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: " << "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(json, /*indent=*/1); << JsonDump(json, /*indent=*/1);
}
return json; return json;
} }
@ -714,10 +712,8 @@ void CdsLb::ResetState() {
} }
void CdsLb::ReportTransientFailure(absl::Status status) { void CdsLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) { GRPC_TRACE_LOG(cds_lb, INFO)
LOG(INFO) << "[cdslb " << this << "[cdslb " << this << "] reporting TRANSIENT_FAILURE: " << status;
<< "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,

@ -404,10 +404,9 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_config_(xds_cluster_impl_lb->drop_config_), drop_config_(xds_cluster_impl_lb->drop_config_),
drop_stats_(xds_cluster_impl_lb->drop_stats_), drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) { picker_(std::move(picker)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << xds_cluster_impl_lb << "[xds_cluster_impl_lb " << xds_cluster_impl_lb
<< "] constructed new picker " << this; << "] constructed new picker " << this;
}
} }
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick( LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
@ -500,17 +499,15 @@ LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client, XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client,
Args args) Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) { : LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << this << "[xds_cluster_impl_lb " << this << "] created -- using xds client "
<< "] created -- using xds client " << xds_client_.get(); << xds_client_.get();
}
} }
XdsClusterImplLb::~XdsClusterImplLb() { XdsClusterImplLb::~XdsClusterImplLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << this << "[xds_cluster_impl_lb " << this
<< "] destroying xds_cluster_impl LB policy"; << "] destroying xds_cluster_impl LB policy";
}
} }
void XdsClusterImplLb::ShutdownLocked() { void XdsClusterImplLb::ShutdownLocked() {
@ -537,10 +534,9 @@ void XdsClusterImplLb::ResetState() {
} }
void XdsClusterImplLb::ReportTransientFailure(absl::Status status) { void XdsClusterImplLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << this << "[xds_cluster_impl_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status; << "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -769,10 +765,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_impl_lb_trace); &xds_cluster_impl_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << this << "[xds_cluster_impl_lb " << this
<< "] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -796,10 +791,9 @@ absl::Status XdsClusterImplLb::UpdateChildPolicyLocked(
update_args.args = update_args.args =
args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name()); args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name());
// Update the policy. // Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) { GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
LOG(INFO) << "[xds_cluster_impl_lb " << this << "[xds_cluster_impl_lb " << this << "] Updating child policy handler "
<< "] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }

@ -249,10 +249,9 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
: LoadBalancingPolicy(std::move(args)) {} : LoadBalancingPolicy(std::move(args)) {}
XdsClusterManagerLb::~XdsClusterManagerLb() { XdsClusterManagerLb::~XdsClusterManagerLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) { GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
LOG(INFO) << "[xds_cluster_manager_lb " << this << "[xds_cluster_manager_lb " << this
<< "] destroying xds_cluster_manager LB policy"; << "] destroying xds_cluster_manager LB policy";
}
} }
void XdsClusterManagerLb::ShutdownLocked() { void XdsClusterManagerLb::ShutdownLocked() {
@ -406,17 +405,15 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
: xds_cluster_manager_policy_(std::move(xds_cluster_manager_policy)), : xds_cluster_manager_policy_(std::move(xds_cluster_manager_policy)),
name_(name), name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) { picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) { GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get() << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] created ClusterChild " << this << " for " << name_; << "] created ClusterChild " << this << " for " << name_;
}
} }
XdsClusterManagerLb::ClusterChild::~ClusterChild() { XdsClusterManagerLb::ClusterChild::~ClusterChild() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) { GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get() << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << ": destroying child"; << "] ClusterChild " << this << ": destroying child";
}
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild"); xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
} }

@ -464,10 +464,9 @@ XdsOverrideHostLb::Picker::Picker(
: policy_(std::move(xds_override_host_lb)), : policy_(std::move(xds_override_host_lb)),
picker_(std::move(picker)), picker_(std::move(picker)),
override_host_health_status_set_(override_host_health_status_set) { override_host_health_status_set_(override_host_health_status_set) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "[xds_override_host_lb " << policy_.get()
<< "] constructed new picker " << this; << "] constructed new picker " << this;
}
} }
absl::optional<LoadBalancingPolicy::PickResult> absl::optional<LoadBalancingPolicy::PickResult>
@ -605,10 +604,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
// Min time between timer runs is 5s so that we don't kill ourselves // Min time between timer runs is 5s so that we don't kill ourselves
// with lock contention and CPU usage due to sweeps over the map. // with lock contention and CPU usage due to sweeps over the map.
duration = std::max(duration, Duration::Seconds(5)); duration = std::max(duration, Duration::Seconds(5));
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer " << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< this << ": subchannel cleanup pass will run in " << duration; << ": subchannel cleanup pass will run in " << duration;
}
timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter( timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter(
duration, [self = RefAsSubclass<IdleTimer>()]() mutable { duration, [self = RefAsSubclass<IdleTimer>()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx; ApplicationCallbackExecCtx callback_exec_ctx;
@ -622,10 +620,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
void XdsOverrideHostLb::IdleTimer::Orphan() { void XdsOverrideHostLb::IdleTimer::Orphan() {
if (timer_handle_.has_value()) { if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer " << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< this << ": cancelling"; << ": cancelling";
}
policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_); policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset(); timer_handle_.reset();
} }
@ -635,10 +632,9 @@ void XdsOverrideHostLb::IdleTimer::Orphan() {
void XdsOverrideHostLb::IdleTimer::OnTimerLocked() { void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
if (timer_handle_.has_value()) { if (timer_handle_.has_value()) {
timer_handle_.reset(); timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer " << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< this << ": timer fired"; << ": timer fired";
}
policy_->CleanupSubchannels(); policy_->CleanupSubchannels();
} }
} }
@ -655,10 +651,9 @@ XdsOverrideHostLb::XdsOverrideHostLb(Args args)
} }
XdsOverrideHostLb::~XdsOverrideHostLb() { XdsOverrideHostLb::~XdsOverrideHostLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] destroying xds_override_host LB policy"; << "] destroying xds_override_host LB policy";
}
} }
void XdsOverrideHostLb::ShutdownLocked() { void XdsOverrideHostLb::ShutdownLocked() {
@ -695,10 +690,9 @@ void XdsOverrideHostLb::ResetState() {
} }
void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) { void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status; << "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -790,10 +784,9 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
args.addresses = args.addresses =
std::make_shared<ChildEndpointIterator>(std::move(*args.addresses)); std::make_shared<ChildEndpointIterator>(std::move(*args.addresses));
} else { } else {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] address error: " << args.addresses.status(); << "] address error: " << args.addresses.status();
}
} }
// Create child policy if needed. // Create child policy if needed.
if (child_policy_ == nullptr) { if (child_policy_ == nullptr) {
@ -805,10 +798,9 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note); update_args.resolution_note = std::move(args.resolution_note);
update_args.config = new_config->child_config(); update_args.config = new_config->child_config();
update_args.args = args_; update_args.args = args_;
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this << "] Updating child policy handler "
<< "] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -837,10 +829,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsOverrideHostLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_override_host_lb_trace); &xds_override_host_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -877,10 +868,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
for (const auto& address : endpoint.addresses()) { for (const auto& address : endpoint.addresses()) {
auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false); auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false);
if (!key.ok()) { if (!key.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] no key for endpoint address; not adding to map"; << "] no key for endpoint address; not adding to map";
}
} else { } else {
addresses.push_back(*std::move(key)); addresses.push_back(*std::move(key));
} }
@ -907,10 +897,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
MutexLock lock(&mu_); MutexLock lock(&mu_);
for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) { for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) {
if (addresses_for_map.find(it->first) == addresses_for_map.end()) { if (addresses_for_map.find(it->first) == addresses_for_map.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "] removing map key " << "[xds_override_host_lb " << this << "] removing map key "
<< it->first; << it->first;
}
it->second->UnsetSubchannel(&subchannel_refs_to_drop); it->second->UnsetSubchannel(&subchannel_refs_to_drop);
it = subchannel_map_.erase(it); it = subchannel_map_.erase(it);
} else { } else {
@ -922,10 +911,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto& address_info = p.second; auto& address_info = p.second;
auto it = subchannel_map_.find(address); auto it = subchannel_map_.find(address);
if (it == subchannel_map_.end()) { if (it == subchannel_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "] adding map key " << "[xds_override_host_lb " << this << "] adding map key "
<< address; << address;
}
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>()) it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first; .first;
} }
@ -973,10 +961,9 @@ XdsOverrideHostLb::AdoptSubchannel(
} }
void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) { void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this << "] creating owned subchannel for "
<< "] creating owned subchannel for " << address; << address;
}
auto addr = StringToSockaddr(address); auto addr = StringToSockaddr(address);
CHECK(addr.ok()); CHECK(addr.ok());
// Note: We don't currently have any cases where per_address_args need to // Note: We don't currently have any cases where per_address_args need to
@ -1016,10 +1003,9 @@ void XdsOverrideHostLb::CleanupSubchannels() {
if (p.second->last_used_time() <= idle_threshold) { if (p.second->last_used_time() <= idle_threshold) {
auto subchannel = p.second->TakeOwnedSubchannel(); auto subchannel = p.second->TakeOwnedSubchannel();
if (subchannel != nullptr) { if (subchannel != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << this << "[xds_override_host_lb " << this
<< "] dropping subchannel for " << p.first; << "] dropping subchannel for " << p.first;
}
subchannel_refs_to_drop.push_back(std::move(subchannel)); subchannel_refs_to_drop.push_back(std::move(subchannel));
} }
} else { } else {
@ -1093,10 +1079,9 @@ void XdsOverrideHostLb::SubchannelWrapper::CancelConnectivityStateWatch(
} }
void XdsOverrideHostLb::SubchannelWrapper::Orphaned() { void XdsOverrideHostLb::SubchannelWrapper::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "[xds_override_host_lb " << policy_.get() << "] subchannel wrapper "
<< "] subchannel wrapper " << this << " orphaned"; << this << " orphaned";
}
if (!IsWorkSerializerDispatchEnabled()) { if (!IsWorkSerializerDispatchEnabled()) {
wrapped_subchannel()->CancelConnectivityStateWatch(watcher_); wrapped_subchannel()->CancelConnectivityStateWatch(watcher_);
if (subchannel_entry_ != nullptr) { if (subchannel_entry_ != nullptr) {
@ -1206,20 +1191,19 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
auto* subchannel = GetSubchannel(); auto* subchannel = GetSubchannel();
if (subchannel != wrapper) return; if (subchannel != wrapper) return;
if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) { if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb] removing unowned subchannel wrapper " << "[xds_override_host_lb] removing unowned subchannel "
<< subchannel; "wrapper "
} << subchannel;
subchannel_ = nullptr; subchannel_ = nullptr;
} else { } else {
// The subchannel is being released by the child policy, but it // The subchannel is being released by the child policy, but it
// is still within its idle timeout, so we make a new copy of // is still within its idle timeout, so we make a new copy of
// the wrapper with the same underlying subchannel, and we hold // the wrapper with the same underlying subchannel, and we hold
// our own ref to it. // our own ref to it.
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) { GRPC_TRACE_LOG(xds_override_host_lb, INFO)
LOG(INFO) << "[xds_override_host_lb] subchannel wrapper " << subchannel << "[xds_override_host_lb] subchannel wrapper " << subchannel
<< ": cloning to gain ownership"; << ": cloning to gain ownership";
}
subchannel_ = wrapper->Clone(); subchannel_ = wrapper->Clone();
} }
} }

@ -239,10 +239,9 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note); update_args.resolution_note = std::move(args.resolution_note);
update_args.args = std::move(args.args); update_args.args = std::move(args.args);
// Update the policy. // Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) { GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] updating child policy " << "[xds_wrr_locality_lb " << this << "] updating child policy "
<< child_policy_.get(); << child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -256,10 +255,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsWrrLocalityLb::CreateChildPolicyLocked(
auto lb_policy = auto lb_policy =
CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy( CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy(
"weighted_target_experimental", std::move(lb_policy_args)); "weighted_target_experimental", std::move(lb_policy_args));
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) { GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
LOG(INFO) << "[xds_wrr_locality_lb " << this << "[xds_wrr_locality_lb " << this << "] created new child policy "
<< "] created new child policy " << lb_policy.get(); << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this LB policy, which in turn is tied to the application's call. // this LB policy, which in turn is tied to the application's call.

@ -181,6 +181,22 @@ class GrpcAresQuery final {
const std::string name_; const std::string name_;
}; };
static absl::Status AresStatusToAbslStatus(int status,
absl::string_view error_msg) {
switch (status) {
case ARES_ECANCELLED:
return absl::CancelledError(error_msg);
case ARES_ENOTIMP:
return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default:
return absl::UnknownError(error_msg);
}
}
static grpc_ares_ev_driver* grpc_ares_ev_driver_ref( static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
grpc_ares_ev_driver* ev_driver) grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
@ -715,8 +731,8 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
hr->qtype, hr->host, hr->is_balancer, ares_strerror(status)); hr->qtype, hr->host, hr->is_balancer, ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked: %s", r, GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked: %s", r,
error_msg.c_str()); error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg); r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error = grpc_error_add_child(error, r->error); r->error);
} }
destroy_hostbyname_request_locked(hr); destroy_hostbyname_request_locked(hr);
} }
@ -761,8 +777,8 @@ static void on_srv_query_done_locked(void* arg, int status, int /*timeouts*/,
ares_strerror(status)); ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked: %s", r, GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked: %s", r,
error_msg.c_str()); error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg); r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error = grpc_error_add_child(error, r->error); r->error);
} }
delete q; delete q;
} }
@ -780,7 +796,6 @@ static void on_txt_done_locked(void* arg, int status, int /*timeouts*/,
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1; const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr; struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr; struct ares_txt_ext* reply = nullptr;
grpc_error_handle error;
if (status != ARES_SUCCESS) goto fail; if (status != ARES_SUCCESS) goto fail;
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked name=%s ARES_SUCCESS", r, GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked name=%s ARES_SUCCESS", r,
q->name().c_str()); q->name().c_str());
@ -824,8 +839,8 @@ fail:
q->name(), ares_strerror(status)); q->name(), ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r, GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r,
error_msg.c_str()); error_msg.c_str());
error = GRPC_ERROR_CREATE(error_msg); r->error =
r->error = grpc_error_add_child(error, r->error); grpc_error_add_child(AresStatusToAbslStatus(status, error_msg), r->error);
} }
grpc_error_handle set_request_dns_server(grpc_ares_request* r, grpc_error_handle set_request_dns_server(grpc_ares_request* r,

@ -106,6 +106,20 @@ Histogram_10000_20 operator-(const Histogram_10000_20& left,
} }
return result; return result;
} }
void HistogramCollector_1800000_40::Collect(
Histogram_1800000_40* result) const {
for (int i = 0; i < 40; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right) {
Histogram_1800000_40 result;
for (int i = 0; i < 40; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
const absl::string_view const absl::string_view
GlobalStats::counter_name[static_cast<int>(Counter::COUNT)] = { GlobalStats::counter_name[static_cast<int>(Counter::COUNT)] = {
"client_calls_created", "client_calls_created",
@ -123,6 +137,8 @@ const absl::string_view
"http2_writes_begun", "http2_writes_begun",
"http2_transport_stalls", "http2_transport_stalls",
"http2_stream_stalls", "http2_stream_stalls",
"http2_hpack_hits",
"http2_hpack_misses",
"cq_pluck_creates", "cq_pluck_creates",
"cq_next_creates", "cq_next_creates",
"cq_callback_creates", "cq_callback_creates",
@ -161,6 +177,8 @@ const absl::string_view GlobalStats::counter_doc[static_cast<int>(
"control window", "control window",
"Number of times sending was completely stalled by the stream flow control " "Number of times sending was completely stalled by the stream flow control "
"window", "window",
"Number of HPACK cache hits",
"Number of HPACK cache misses (entries added but never used)",
"Number of completion queues created for cq_pluck (indicates sync api " "Number of completion queues created for cq_pluck (indicates sync api "
"usage)", "usage)",
"Number of completion queues created for cq_next (indicates cq async api " "Number of completion queues created for cq_next (indicates cq async api "
@ -192,6 +210,7 @@ const absl::string_view
"tcp_read_offer_iov_size", "tcp_read_offer_iov_size",
"http2_send_message_size", "http2_send_message_size",
"http2_metadata_size", "http2_metadata_size",
"http2_hpack_entry_lifetime",
"wrr_subchannel_list_size", "wrr_subchannel_list_size",
"wrr_subchannel_ready_size", "wrr_subchannel_ready_size",
"work_serializer_run_time_ms", "work_serializer_run_time_ms",
@ -223,6 +242,7 @@ const absl::string_view GlobalStats::histogram_doc[static_cast<int>(
"Number of byte segments offered to each syscall_read", "Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport", "Size of messages received by HTTP2 transport",
"Number of bytes consumed by metadata, according to HPACK accounting rules", "Number of bytes consumed by metadata, according to HPACK accounting rules",
"Lifetime of HPACK entries in the cache (in milliseconds)",
"Number of subchannels in a subchannel list at picker creation time", "Number of subchannels in a subchannel list at picker creation time",
"Number of READY subchannels in a subchannel list at picker creation time", "Number of READY subchannels in a subchannel list at picker creation time",
"Number of milliseconds work serializers run for", "Number of milliseconds work serializers run for",
@ -278,6 +298,15 @@ const int kStatsTable10[21] = {0, 1, 2, 4, 7, 12, 19,
const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8, const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8,
9, 9, 10, 11, 12, 12, 13, 14, 9, 9, 10, 11, 12, 12, 13, 14,
15, 15, 16, 17, 18, 18, 19}; 15, 15, 16, 17, 18, 18, 19};
const int kStatsTable12[41] = {
0, 1, 2, 3, 5, 8, 12, 18, 26,
37, 53, 76, 108, 153, 217, 308, 436, 617,
873, 1235, 1748, 2473, 3499, 4950, 7003, 9907, 14015,
19825, 28044, 39670, 56116, 79379, 112286, 158835, 224680, 317821,
449574, 635945, 899575, 1272492, 1800000};
const uint8_t kStatsTable13[37] = {
4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
} // namespace } // namespace
int Histogram_100000_20::BucketFor(int value) { int Histogram_100000_20::BucketFor(int value) {
if (value < 3) { if (value < 3) {
@ -405,6 +434,29 @@ int Histogram_10000_20::BucketFor(int value) {
} }
} }
} }
int Histogram_1800000_40::BucketFor(int value) {
if (value < 4) {
if (value < 0) {
return 0;
} else {
return value;
}
} else {
if (value < 1048577) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable13[((val.uint - 4616189618054758400ull) >> 51)];
return bucket - (value < kStatsTable12[bucket]);
} else {
if (value < 1272492) {
return 38;
} else {
return 39;
}
}
}
}
GlobalStats::GlobalStats() GlobalStats::GlobalStats()
: client_calls_created{0}, : client_calls_created{0},
server_calls_created{0}, server_calls_created{0},
@ -421,6 +473,8 @@ GlobalStats::GlobalStats()
http2_writes_begun{0}, http2_writes_begun{0},
http2_transport_stalls{0}, http2_transport_stalls{0},
http2_stream_stalls{0}, http2_stream_stalls{0},
http2_hpack_hits{0},
http2_hpack_misses{0},
cq_pluck_creates{0}, cq_pluck_creates{0},
cq_next_creates{0}, cq_next_creates{0},
cq_callback_creates{0}, cq_callback_creates{0},
@ -466,6 +520,9 @@ HistogramView GlobalStats::histogram(Histogram which) const {
case Histogram::kHttp2MetadataSize: case Histogram::kHttp2MetadataSize:
return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26, return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26,
http2_metadata_size.buckets()}; http2_metadata_size.buckets()};
case Histogram::kHttp2HpackEntryLifetime:
return HistogramView{&Histogram_1800000_40::BucketFor, kStatsTable12, 40,
http2_hpack_entry_lifetime.buckets()};
case Histogram::kWrrSubchannelListSize: case Histogram::kWrrSubchannelListSize:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20, return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
wrr_subchannel_list_size.buckets()}; wrr_subchannel_list_size.buckets()};
@ -560,6 +617,10 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.http2_transport_stalls.load(std::memory_order_relaxed); data.http2_transport_stalls.load(std::memory_order_relaxed);
result->http2_stream_stalls += result->http2_stream_stalls +=
data.http2_stream_stalls.load(std::memory_order_relaxed); data.http2_stream_stalls.load(std::memory_order_relaxed);
result->http2_hpack_hits +=
data.http2_hpack_hits.load(std::memory_order_relaxed);
result->http2_hpack_misses +=
data.http2_hpack_misses.load(std::memory_order_relaxed);
result->cq_pluck_creates += result->cq_pluck_creates +=
data.cq_pluck_creates.load(std::memory_order_relaxed); data.cq_pluck_creates.load(std::memory_order_relaxed);
result->cq_next_creates += result->cq_next_creates +=
@ -598,6 +659,8 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.tcp_read_offer_iov_size.Collect(&result->tcp_read_offer_iov_size); data.tcp_read_offer_iov_size.Collect(&result->tcp_read_offer_iov_size);
data.http2_send_message_size.Collect(&result->http2_send_message_size); data.http2_send_message_size.Collect(&result->http2_send_message_size);
data.http2_metadata_size.Collect(&result->http2_metadata_size); data.http2_metadata_size.Collect(&result->http2_metadata_size);
data.http2_hpack_entry_lifetime.Collect(
&result->http2_hpack_entry_lifetime);
data.wrr_subchannel_list_size.Collect(&result->wrr_subchannel_list_size); data.wrr_subchannel_list_size.Collect(&result->wrr_subchannel_list_size);
data.wrr_subchannel_ready_size.Collect(&result->wrr_subchannel_ready_size); data.wrr_subchannel_ready_size.Collect(&result->wrr_subchannel_ready_size);
data.work_serializer_run_time_ms.Collect( data.work_serializer_run_time_ms.Collect(
@ -664,6 +727,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_transport_stalls = result->http2_transport_stalls =
http2_transport_stalls - other.http2_transport_stalls; http2_transport_stalls - other.http2_transport_stalls;
result->http2_stream_stalls = http2_stream_stalls - other.http2_stream_stalls; result->http2_stream_stalls = http2_stream_stalls - other.http2_stream_stalls;
result->http2_hpack_hits = http2_hpack_hits - other.http2_hpack_hits;
result->http2_hpack_misses = http2_hpack_misses - other.http2_hpack_misses;
result->cq_pluck_creates = cq_pluck_creates - other.cq_pluck_creates; result->cq_pluck_creates = cq_pluck_creates - other.cq_pluck_creates;
result->cq_next_creates = cq_next_creates - other.cq_next_creates; result->cq_next_creates = cq_next_creates - other.cq_next_creates;
result->cq_callback_creates = cq_callback_creates - other.cq_callback_creates; result->cq_callback_creates = cq_callback_creates - other.cq_callback_creates;
@ -695,6 +760,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_send_message_size = result->http2_send_message_size =
http2_send_message_size - other.http2_send_message_size; http2_send_message_size - other.http2_send_message_size;
result->http2_metadata_size = http2_metadata_size - other.http2_metadata_size; result->http2_metadata_size = http2_metadata_size - other.http2_metadata_size;
result->http2_hpack_entry_lifetime =
http2_hpack_entry_lifetime - other.http2_hpack_entry_lifetime;
result->wrr_subchannel_list_size = result->wrr_subchannel_list_size =
wrr_subchannel_list_size - other.wrr_subchannel_list_size; wrr_subchannel_list_size - other.wrr_subchannel_list_size;
result->wrr_subchannel_ready_size = result->wrr_subchannel_ready_size =

@ -35,6 +35,7 @@ class Histogram_100000_20 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100000_20 operator-(const Histogram_100000_20& left, friend Histogram_100000_20 operator-(const Histogram_100000_20& left,
const Histogram_100000_20& right); const Histogram_100000_20& right);
@ -58,6 +59,7 @@ class Histogram_65536_26 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 26; }
friend Histogram_65536_26 operator-(const Histogram_65536_26& left, friend Histogram_65536_26 operator-(const Histogram_65536_26& left,
const Histogram_65536_26& right); const Histogram_65536_26& right);
@ -81,6 +83,7 @@ class Histogram_100_20 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100_20 operator-(const Histogram_100_20& left, friend Histogram_100_20 operator-(const Histogram_100_20& left,
const Histogram_100_20& right); const Histogram_100_20& right);
@ -104,6 +107,7 @@ class Histogram_16777216_20 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_16777216_20 operator-(const Histogram_16777216_20& left, friend Histogram_16777216_20 operator-(const Histogram_16777216_20& left,
const Histogram_16777216_20& right); const Histogram_16777216_20& right);
@ -127,6 +131,7 @@ class Histogram_80_10 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 10; }
friend Histogram_80_10 operator-(const Histogram_80_10& left, friend Histogram_80_10 operator-(const Histogram_80_10& left,
const Histogram_80_10& right); const Histogram_80_10& right);
@ -150,6 +155,7 @@ class Histogram_10000_20 {
public: public:
static int BucketFor(int value); static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; } const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_10000_20 operator-(const Histogram_10000_20& left, friend Histogram_10000_20 operator-(const Histogram_10000_20& left,
const Histogram_10000_20& right); const Histogram_10000_20& right);
@ -168,6 +174,30 @@ class HistogramCollector_10000_20 {
private: private:
std::atomic<uint64_t> buckets_[20]{}; std::atomic<uint64_t> buckets_[20]{};
}; };
class HistogramCollector_1800000_40;
class Histogram_1800000_40 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 40; }
friend Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right);
private:
friend class HistogramCollector_1800000_40;
uint64_t buckets_[40]{};
};
class HistogramCollector_1800000_40 {
public:
void Increment(int value) {
buckets_[Histogram_1800000_40::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_1800000_40* result) const;
private:
std::atomic<uint64_t> buckets_[40]{};
};
struct GlobalStats { struct GlobalStats {
enum class Counter { enum class Counter {
kClientCallsCreated, kClientCallsCreated,
@ -185,6 +215,8 @@ struct GlobalStats {
kHttp2WritesBegun, kHttp2WritesBegun,
kHttp2TransportStalls, kHttp2TransportStalls,
kHttp2StreamStalls, kHttp2StreamStalls,
kHttp2HpackHits,
kHttp2HpackMisses,
kCqPluckCreates, kCqPluckCreates,
kCqNextCreates, kCqNextCreates,
kCqCallbackCreates, kCqCallbackCreates,
@ -213,6 +245,7 @@ struct GlobalStats {
kTcpReadOfferIovSize, kTcpReadOfferIovSize,
kHttp2SendMessageSize, kHttp2SendMessageSize,
kHttp2MetadataSize, kHttp2MetadataSize,
kHttp2HpackEntryLifetime,
kWrrSubchannelListSize, kWrrSubchannelListSize,
kWrrSubchannelReadySize, kWrrSubchannelReadySize,
kWorkSerializerRunTimeMs, kWorkSerializerRunTimeMs,
@ -259,6 +292,8 @@ struct GlobalStats {
uint64_t http2_writes_begun; uint64_t http2_writes_begun;
uint64_t http2_transport_stalls; uint64_t http2_transport_stalls;
uint64_t http2_stream_stalls; uint64_t http2_stream_stalls;
uint64_t http2_hpack_hits;
uint64_t http2_hpack_misses;
uint64_t cq_pluck_creates; uint64_t cq_pluck_creates;
uint64_t cq_next_creates; uint64_t cq_next_creates;
uint64_t cq_callback_creates; uint64_t cq_callback_creates;
@ -287,6 +322,7 @@ struct GlobalStats {
Histogram_80_10 tcp_read_offer_iov_size; Histogram_80_10 tcp_read_offer_iov_size;
Histogram_16777216_20 http2_send_message_size; Histogram_16777216_20 http2_send_message_size;
Histogram_65536_26 http2_metadata_size; Histogram_65536_26 http2_metadata_size;
Histogram_1800000_40 http2_hpack_entry_lifetime;
Histogram_10000_20 wrr_subchannel_list_size; Histogram_10000_20 wrr_subchannel_list_size;
Histogram_10000_20 wrr_subchannel_ready_size; Histogram_10000_20 wrr_subchannel_ready_size;
Histogram_100000_20 work_serializer_run_time_ms; Histogram_100000_20 work_serializer_run_time_ms;
@ -367,6 +403,12 @@ class GlobalStatsCollector {
data_.this_cpu().http2_stream_stalls.fetch_add(1, data_.this_cpu().http2_stream_stalls.fetch_add(1,
std::memory_order_relaxed); std::memory_order_relaxed);
} }
void IncrementHttp2HpackHits() {
data_.this_cpu().http2_hpack_hits.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2HpackMisses() {
data_.this_cpu().http2_hpack_misses.fetch_add(1, std::memory_order_relaxed);
}
void IncrementCqPluckCreates() { void IncrementCqPluckCreates() {
data_.this_cpu().cq_pluck_creates.fetch_add(1, std::memory_order_relaxed); data_.this_cpu().cq_pluck_creates.fetch_add(1, std::memory_order_relaxed);
} }
@ -447,6 +489,9 @@ class GlobalStatsCollector {
void IncrementHttp2MetadataSize(int value) { void IncrementHttp2MetadataSize(int value) {
data_.this_cpu().http2_metadata_size.Increment(value); data_.this_cpu().http2_metadata_size.Increment(value);
} }
void IncrementHttp2HpackEntryLifetime(int value) {
data_.this_cpu().http2_hpack_entry_lifetime.Increment(value);
}
void IncrementWrrSubchannelListSize(int value) { void IncrementWrrSubchannelListSize(int value) {
data_.this_cpu().wrr_subchannel_list_size.Increment(value); data_.this_cpu().wrr_subchannel_list_size.Increment(value);
} }
@ -526,6 +571,8 @@ class GlobalStatsCollector {
std::atomic<uint64_t> http2_writes_begun{0}; std::atomic<uint64_t> http2_writes_begun{0};
std::atomic<uint64_t> http2_transport_stalls{0}; std::atomic<uint64_t> http2_transport_stalls{0};
std::atomic<uint64_t> http2_stream_stalls{0}; std::atomic<uint64_t> http2_stream_stalls{0};
std::atomic<uint64_t> http2_hpack_hits{0};
std::atomic<uint64_t> http2_hpack_misses{0};
std::atomic<uint64_t> cq_pluck_creates{0}; std::atomic<uint64_t> cq_pluck_creates{0};
std::atomic<uint64_t> cq_next_creates{0}; std::atomic<uint64_t> cq_next_creates{0};
std::atomic<uint64_t> cq_callback_creates{0}; std::atomic<uint64_t> cq_callback_creates{0};
@ -551,6 +598,7 @@ class GlobalStatsCollector {
HistogramCollector_80_10 tcp_read_offer_iov_size; HistogramCollector_80_10 tcp_read_offer_iov_size;
HistogramCollector_16777216_20 http2_send_message_size; HistogramCollector_16777216_20 http2_send_message_size;
HistogramCollector_65536_26 http2_metadata_size; HistogramCollector_65536_26 http2_metadata_size;
HistogramCollector_1800000_40 http2_hpack_entry_lifetime;
HistogramCollector_10000_20 wrr_subchannel_list_size; HistogramCollector_10000_20 wrr_subchannel_list_size;
HistogramCollector_10000_20 wrr_subchannel_ready_size; HistogramCollector_10000_20 wrr_subchannel_ready_size;
HistogramCollector_100000_20 work_serializer_run_time_ms; HistogramCollector_100000_20 work_serializer_run_time_ms;

@ -80,6 +80,14 @@
max: 65536 max: 65536
buckets: 26 buckets: 26
doc: Number of bytes consumed by metadata, according to HPACK accounting rules doc: Number of bytes consumed by metadata, according to HPACK accounting rules
- counter: http2_hpack_hits
doc: Number of HPACK cache hits
- counter: http2_hpack_misses
doc: Number of HPACK cache misses (entries added but never used)
- histogram: http2_hpack_entry_lifetime
doc: Lifetime of HPACK entries in the cache (in milliseconds)
max: 1800000
buckets: 40
# completion queues # completion queues
- counter: cq_pluck_creates - counter: cq_pluck_creates
doc: Number of completion queues created for cq_pluck (indicates sync api usage) doc: Number of completion queues created for cq_pluck (indicates sync api usage)

@ -28,6 +28,7 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/crash.h" #include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/transport_security_grpc.h" #include "src/core/tsi/transport_security_grpc.h"
@ -210,6 +211,8 @@ static tsi_result tsi_fake_frame_decode(const unsigned char* incoming_bytes,
frame->offset += to_read_size; frame->offset += to_read_size;
available_size -= to_read_size; available_size -= to_read_size;
frame->size = load32_little_endian(frame->data); frame->size = load32_little_endian(frame->data);
if (frame->size < 4) return TSI_DATA_CORRUPTED;
if (frame->size > 16 * 1024 * 1024) return TSI_DATA_CORRUPTED;
tsi_fake_frame_ensure_size(frame); tsi_fake_frame_ensure_size(frame);
} }

@ -0,0 +1,86 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#define GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
namespace grpc_core {
// Like std::unique_ptr, but also includes a small bitset stored in the lower
// bits of the underlying T*.
template <typename T, size_t kBits>
class UniquePtrWithBitset {
public:
UniquePtrWithBitset() : p_(0) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::nullptr_t) : p_(0) {}
explicit UniquePtrWithBitset(T* p) : p_(reinterpret_cast<uintptr_t>(p)) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::unique_ptr<T>&& p)
: UniquePtrWithBitset(p.release()) {}
~UniquePtrWithBitset() {
DCHECK_LE(kBits, static_cast<size_t>(absl::countr_zero(alignof(T))));
delete get();
}
UniquePtrWithBitset(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset& operator=(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset(UniquePtrWithBitset&& other) noexcept
: p_(std::exchange(other.p_, 0)) {}
UniquePtrWithBitset& operator=(UniquePtrWithBitset&& other) noexcept {
p_ = std::exchange(other.p_, 0);
return *this;
}
T* get() const { return reinterpret_cast<T*>(p_ & ~kBitMask); }
T* operator->() const { return get(); }
T& operator*() const { return *get(); }
explicit operator bool() const { return get() != nullptr; }
void reset(T* p = nullptr) {
uintptr_t bits = p_ & kBitMask;
delete get();
p_ = reinterpret_cast<uintptr_t>(p) | bits;
}
void SetBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ |= 1 << bit;
}
void ClearBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ &= ~(1 << bit);
}
bool TestBit(size_t bit) const {
DCHECK_LT(bit, kBits);
return p_ & (1 << bit);
}
friend bool operator==(const UniquePtrWithBitset& a,
const UniquePtrWithBitset& b) {
return a.p_ == b.p_;
}
private:
static constexpr uintptr_t kBitMask = (1 << kBits) - 1;
uintptr_t p_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H

@ -1564,8 +1564,9 @@ XdsClient::XdsClient(
} }
CHECK(bootstrap_ != nullptr); CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) { if (bootstrap_->node() != nullptr) {
LOG(INFO) << "[xds_client " << this GRPC_TRACE_LOG(xds_client, INFO)
<< "] xDS node ID: " << bootstrap_->node()->id(); << "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
} }
} }

@ -44,8 +44,8 @@ module GRPC
include Core::CallOps include Core::CallOps
extend Forwardable extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=, def_delegators :@call, :cancel, :cancel_with_status, :metadata,
:trailing_metadata, :status :write_flag, :write_flag=, :trailing_metadata, :status
# client_invoke begins a client invocation. # client_invoke begins a client invocation.
# #
@ -620,6 +620,8 @@ module GRPC
# @param metadata [Hash] metadata to be sent to the server. If a value is # @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent # a list, multiple metadata for its key are sent
def start_call(metadata = {}) def start_call(metadata = {})
# TODO(apolcyn): we should cancel and clean up the call in case this
# send initial MD op fails.
merge_metadata_to_send(metadata) && send_initial_metadata merge_metadata_to_send(metadata) && send_initial_metadata
end end
@ -665,9 +667,10 @@ module GRPC
# Operation limits access to an ActiveCall's methods for use as # Operation limits access to an ActiveCall's methods for use as
# a Operation on the client. # a Operation on the client.
Operation = view_class(:cancel, :cancelled?, :deadline, :execute, # TODO(apolcyn): expose peer getter
:metadata, :status, :start_call, :wait, :write_flag, Operation = view_class(:cancel, :cancel_with_status, :cancelled?, :deadline,
:write_flag=, :trailing_metadata) :execute, :metadata, :status, :start_call, :wait,
:write_flag, :write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods # InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline # for use in interceptors on the client, exposing only the deadline

@ -90,88 +90,101 @@ describe GRPC::Core::Call do
describe '#status' do describe '#status' do
it 'can save the status and read it back' do it 'can save the status and read it back' do
call = make_test_call make_test_call do |call|
sts = Struct::Status.new(OK, 'OK') sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts) expect(call.status).to eq(sts)
end
end end
it 'must be set to a status' do it 'must be set to a status' do
call = make_test_call make_test_call do |call|
bad_sts = Object.new bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError) expect { call.status = bad_sts }.to raise_error(TypeError)
end
end end
it 'can be set to nil' do it 'can be set to nil' do
call = make_test_call make_test_call do |call|
expect { call.status = nil }.not_to raise_error expect { call.status = nil }.not_to raise_error
end
end end
end end
describe '#metadata' do describe '#metadata' do
it 'can save the metadata hash and read it back' do it 'can save the metadata hash and read it back' do
call = make_test_call make_test_call do |call|
md = { 'k1' => 'v1', 'k2' => 'v2' } md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md) expect(call.metadata).to be(md)
end
end end
it 'must be set with a hash' do it 'must be set with a hash' do
call = make_test_call make_test_call do |call|
bad_md = Object.new bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError) expect { call.metadata = bad_md }.to raise_error(TypeError)
end
end end
it 'can be set to nil' do it 'can be set to nil' do
call = make_test_call make_test_call do |call|
expect { call.metadata = nil }.not_to raise_error expect { call.metadata = nil }.not_to raise_error
end
end end
end end
describe '#set_credentials!' do describe '#set_credentials!' do
it 'can set a valid CallCredentials object' do it 'can set a valid CallCredentials object' do
call = make_test_call make_test_call do |call|
auth_proc = proc { { 'plugin_key' => 'plugin_value' } } auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error expect { call.set_credentials! creds }.not_to raise_error
end
end end
end end
describe '#cancel' do describe '#cancel' do
it 'completes ok' do it 'completes ok' do
call = make_test_call make_test_call do |call|
expect { call.cancel }.not_to raise_error expect { call.cancel }.not_to raise_error
end
end end
it 'completes ok when the call is closed' do it 'completes ok when the call is closed' do
call = make_test_call make_test_call do |call|
call.close call.close
expect { call.cancel }.not_to raise_error expect { call.cancel }.not_to raise_error
end
end end
end end
describe '#cancel_with_status' do describe '#cancel_with_status' do
it 'completes ok' do it 'completes ok' do
call = make_test_call make_test_call do |call|
expect do expect do
call.cancel_with_status(0, 'test status') call.cancel_with_status(0, 'test status')
end.not_to raise_error end.not_to raise_error
expect do expect do
call.cancel_with_status(0, nil) call.cancel_with_status(0, nil)
end.to raise_error(TypeError) end.to raise_error(TypeError)
end
end end
it 'completes ok when the call is closed' do it 'completes ok when the call is closed' do
call = make_test_call make_test_call do |call|
call.close call.close
expect do expect do
call.cancel_with_status(0, 'test status') call.cancel_with_status(0, 'test status')
end.not_to raise_error end.not_to raise_error
end
end end
end end
def make_test_call def make_test_call
@ch.create_call(nil, nil, 'phony_method', nil, deadline) call = @ch.create_call(nil, nil, 'phony_method', nil, deadline)
yield call
call.close
end end
def deadline def deadline

@ -118,7 +118,8 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5 deadline = Time.now + 5
blk = proc do blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline) call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
call.close
end end
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
end end
@ -132,8 +133,9 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5 deadline = Time.now + 5
blk = proc do blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline) call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
STDERR.puts "#{Time.now}: created call" STDERR.puts "#{Time.now}: created call"
call.close
end end
expect(&blk).to raise_error(RuntimeError) expect(&blk).to raise_error(RuntimeError)
STDERR.puts "#{Time.now}: finished: raises an error if called on a closed channel" STDERR.puts "#{Time.now}: finished: raises an error if called on a closed channel"

@ -16,36 +16,8 @@ require 'spec_helper'
include GRPC::Core include GRPC::Core
shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
def deadline
Time.now + 5
end
def server_allows_client_to_proceed(metadata = {})
recvd_rpc = @server.request_call
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
ops = { CallOps::SEND_INITIAL_METADATA => metadata }
server_batch = server_call.run_batch(ops)
expect(server_batch.send_metadata).to be true
server_call
end
def new_client_call
@ch.create_call(nil, nil, '/method', nil, deadline)
end
def ok_status
Struct::Status.new(StatusCodes::OK, 'OK')
end
end
shared_examples 'basic GRPC message delivery is OK' do shared_examples 'basic GRPC message delivery is OK' do
include GRPC::Core include GRPC::Core
include_context 'setup: tags'
context 'the test channel' do context 'the test channel' do
it 'should have a target' do it 'should have a target' do
@ -53,272 +25,45 @@ shared_examples 'basic GRPC message delivery is OK' do
end end
end end
context 'a client call' do it 'unary calls work' do
it 'should have a peer' do run_services_on_server(@server, services: [EchoService]) do
expect(new_client_call.peer).to be_a(String) call = @stub.an_rpc(EchoMsg.new, return_op: true)
end expect(call.execute).to be_a(EchoMsg)
end
it 'calls have peer info' do
call = new_client_call
expect(call.peer).to be_a(String)
end
it 'servers receive requests from clients and can respond' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'responses written by servers are received by the client' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq(reply_text)
expect(final_client_batch.status.code).to eq(0)
end
it 'compressed messages can be sent and received' do
call = new_client_call
server_call = nil
long_request_str = '0' * 2000
long_response_str = '1' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
server_thread = Thread.new do
server_call = server_allows_client_to_proceed(md)
end end
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_MESSAGE => long_request_str,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(long_request_str)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => long_response_str,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq long_response_str
expect(final_client_batch.status.code).to eq(0)
end end
it 'servers can ignore a client write and send a status' do it 'unary calls work when enabling compression' do
call = new_client_call run_services_on_server(@server, services: [EchoService]) do
server_call = nil long_request_str = '0' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
server_thread = Thread.new do call = @stub.an_rpc(EchoMsg.new(msg: long_request_str),
server_call = server_allows_client_to_proceed return_op: true,
metadata: md)
response = call.execute
expect(response).to be_a(EchoMsg)
expect(response.msg).to eq(long_request_str)
end end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
the_status = Struct::Status.new(StatusCodes::OK, 'OK')
server_thread.join
server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq nil
expect(server_batch.send_status).to be true
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'completes calls by sending status to client and server' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq sent_message
server_ops = {
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_status).to be true
expect(server_batch.send_message).to be true
# confirm the client can receive the server response and status.
client_ops = {
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.send_close).to be true
expect(final_client_batch.message).to eq reply_text
expect(final_client_batch.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
final_server_batch = server_call.run_batch(server_ops)
expect(final_server_batch.send_close).to be true
end end
def client_cancel_test(cancel_proc, expected_code, def client_cancel_test(cancel_proc, expected_code,
expected_details) expected_details)
call = new_client_call call = @stub.an_rpc(EchoMsg.new, return_op: true)
server_call = nil run_services_on_server(@server, services: [EchoService]) do
# start the call, but don't send a message yet
server_thread = Thread.new do call.start_call
server_call = server_allows_client_to_proceed # cancel the call
cancel_proc.call(call)
# check the client's status
failed = false
begin
call.execute
rescue GRPC::BadStatus => e
failed = true
expect(e.code).to be expected_code
expect(e.details).to eq expected_details
end
expect(failed).to be(true)
end end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::RECV_INITIAL_METADATA => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.metadata).to eq({})
cancel_proc.call(call)
server_thread.join
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
client_ops = {
CallOps::RECV_STATUS_ON_CLIENT => {}
}
client_batch = call.run_batch(client_ops)
expect(client_batch.status.code).to be expected_code
expect(client_batch.status.details).to eq expected_details
end end
it 'clients can cancel a call on the server' do it 'clients can cancel a call on the server' do
@ -344,8 +89,6 @@ shared_examples 'basic GRPC message delivery is OK' do
end end
shared_examples 'GRPC metadata delivery works OK' do shared_examples 'GRPC metadata delivery works OK' do
include_context 'setup: tags'
describe 'from client => server' do describe 'from client => server' do
before(:example) do before(:example) do
n = 7 # arbitrary number of metadata n = 7 # arbitrary number of metadata
@ -364,53 +107,31 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md| @bad_keys.each do |md|
call = new_client_call # NOTE: no need to run a server in this test b/c the failure
client_ops = { # happens while validating metadata to send.
CallOps::SEND_INITIAL_METADATA => md failed = false
} begin
blk = proc do @stub.an_rpc(EchoMsg.new, metadata: md)
call.run_batch(client_ops) rescue TypeError => e
failed = true
expect(e.message).to eq('grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end end
expect(&blk).to raise_error expect(failed).to be(true)
end end
end end
it 'sends all the metadata pairs when keys and values are valid' do it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md| service = EchoService.new
recvd_rpc = nil run_services_on_server(@server, services: [service]) do
rcv_thread = Thread.new do @valid_metadata.each_with_index do |md, i|
recvd_rpc = @server.request_call expect(@stub.an_rpc(EchoMsg.new, metadata: md)).to be_a(EchoMsg)
# confirm the server can receive the client metadata
# finish the call
expect(service.received_md.length).to eq(i + 1)
md.each do |k, v|
expect(service.received_md[i][k.to_s]).to eq(v)
end
end end
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
# finish the call
final_server_batch = recvd_rpc.call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end end
end end
end end
@ -432,120 +153,61 @@ shared_examples 'GRPC metadata delivery works OK' do
end end
it 'raises an exception if a metadata key is invalid' do it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md| service = EchoService.new
recvd_rpc = nil run_services_on_server(@server, services: [service]) do
rcv_thread = Thread.new do @bad_keys.each do |md|
recvd_rpc = @server.request_call proceed = Queue.new
end server_exception = nil
service.on_call_started = proc do |call|
call = new_client_call call.send_initial_metadata(md)
# client signals that it's done sending metadata to allow server to rescue TypeError => e
# respond server_exception = e
client_ops = { ensure
CallOps::SEND_INITIAL_METADATA => nil proceed.push(1)
} end
call.run_batch(client_ops) client_exception = nil
client_call = @stub.an_rpc(EchoMsg.new, return_op: true)
# server gets the invocation thr = Thread.new do
rcv_thread.join client_call.execute
expect(recvd_rpc).to_not eq nil rescue GRPC::BadStatus => e
server_ops = { client_exception = e
CallOps::SEND_INITIAL_METADATA => md end
} proceed.pop
blk = proc do # TODO(apolcyn): we shouldn't need this cancel here. It's
recvd_rpc.call.run_batch(server_ops) # only currently needed b/c the server does not seem to properly
# terminate the RPC if it fails to send initial metadata. That
# should be fixed, in which case this cancellation can be removed.
client_call.cancel
thr.join
p client_exception
expect(client_exception.nil?).to be(false)
expect(server_exception.nil?).to be(false)
expect(server_exception.message).to eq(
'grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end end
expect(&blk).to raise_error
# cancel the call so the server can shut down immediately
call.cancel
end end
end end
it 'sends an empty hash if no metadata is added' do it 'sends an empty hash if no metadata is added' do
recvd_rpc = nil run_services_on_server(@server, services: [EchoService]) do
rcv_thread = Thread.new do call = @stub.an_rpc(EchoMsg.new, return_op: true)
recvd_rpc = @server.request_call expect(call.execute).to be_a(EchoMsg)
expect(call.metadata).to eq({})
end end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
# receive close and send status to finish the call
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
# receive status to finish the call
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end end
it 'sends all the pairs when keys and values are valid' do it 'sends all the pairs when keys and values are valid' do
@valid_metadata.each do |md| service = EchoService.new
recvd_rpc = nil run_services_on_server(@server, services: [service]) do
rcv_thread = Thread.new do @valid_metadata.each do |md|
recvd_rpc = @server.request_call service.on_call_started = proc do |call|
call.send_initial_metadata(md)
end
call = @stub.an_rpc(EchoMsg.new, return_op: true)
call.execute
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(call.metadata).to eq(replace_symbols)
end end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(final_client_batch.metadata).to eq(replace_symbols)
expect(final_client_batch.status.code).to eq(0)
end end
end end
end end
@ -554,16 +216,11 @@ end
describe 'the http client/server' do describe 'the http client/server' do
before(:example) do before(:example) do
server_host = '0.0.0.0:0' server_host = '0.0.0.0:0'
@server = new_core_server_for_testing(nil) @server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, :this_port_is_insecure) server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
@server.start
@ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure) @ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
end @stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
after(:example) do
@ch.close
@server.shutdown_and_notify(deadline)
@server.close
end end
it_behaves_like 'basic GRPC message delivery is OK' do it_behaves_like 'basic GRPC message delivery is OK' do
@ -574,8 +231,6 @@ describe 'the http client/server' do
end end
describe 'the secure http client/server' do describe 'the secure http client/server' do
include_context 'setup: tags'
def load_test_certs def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata') test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'server1.key', 'server1.pem'] files = ['ca.pem', 'server1.key', 'server1.pem']
@ -587,17 +242,14 @@ describe 'the secure http client/server' do
server_host = '0.0.0.0:0' server_host = '0.0.0.0:0'
server_creds = GRPC::Core::ServerCredentials.new( server_creds = GRPC::Core::ServerCredentials.new(
nil, [{ private_key: certs[1], cert_chain: certs[2] }], false) nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
@server = new_core_server_for_testing(nil) @server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, server_creds) server_port = @server.add_http2_port(server_host, server_creds)
@server.start
args = { Channel::SSL_TARGET => 'foo.test.google.fr' } args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
@ch = Channel.new("0.0.0.0:#{server_port}", args, @ch = Channel.new(
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil)) "0.0.0.0:#{server_port}", args,
end GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
@stub = EchoStub.new(
after(:example) do "0.0.0.0:#{server_port}", nil, channel_override: @ch)
@server.shutdown_and_notify(deadline)
@server.close
end end
it_behaves_like 'basic GRPC message delivery is OK' do it_behaves_like 'basic GRPC message delivery is OK' do
@ -606,59 +258,25 @@ describe 'the secure http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do it_behaves_like 'GRPC metadata delivery works OK' do
end end
def credentials_update_test(creds_update_md) it 'modifies metadata with CallCredentials' do
auth_proc = proc { creds_update_md } # create call creds
auth_proc = proc { { 'k1' => 'v1' } }
call_creds = GRPC::Core::CallCredentials.new(auth_proc) call_creds = GRPC::Core::CallCredentials.new(auth_proc)
# create arbitrary custom metadata
initial_md_key = 'k2' custom_md = { 'k2' => 'v2' }
initial_md_val = 'v2' # perform an RPC
initial_md = { initial_md_key => initial_md_val } echo_service = EchoService.new
expected_md = creds_update_md.clone run_services_on_server(@server, services: [echo_service]) do
fail 'bad test param' unless expected_md[initial_md_key].nil? expect(@stub.an_rpc(EchoMsg.new,
expected_md[initial_md_key] = initial_md_val credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
recvd_rpc = nil end
rcv_thread = Thread.new do # call creds metadata should be merged with custom MD
recvd_rpc = @server.request_call expect(echo_service.received_md.length).to eq(1)
expected_md = { 'k1' => 'v1', 'k2' => 'v2' }
expected_md.each do |k, v|
expect(echo_service.received_md[0][k]).to eq(v)
end end
call = new_client_call
call.set_credentials! call_creds
client_batch = call.run_batch(
CallOps::SEND_INITIAL_METADATA => initial_md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[expected_md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
credentials_update_test_finish_call(call, recvd_rpc.call)
end
def credentials_update_test_finish_call(client_call, server_call)
final_server_batch = server_call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = client_call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'modifies metadata with CallCredentials' do
credentials_update_test('k1' => 'updated-v1')
end end
it 'modifies large metadata with CallCredentials' do it 'modifies large metadata with CallCredentials' do
@ -666,11 +284,34 @@ describe 'the secure http client/server' do
'00000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111', '11111111111111111111111111111111111111111111111111111111111111',
) )
md = { # create call creds
k3: val_array, auth_proc = proc do
k4: '0000000000000000000000000000000000000000000000000000000000', {
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey5: 'v1' k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
end
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
# create arbitrary custom metadata
custom_md = { k1: 'v1' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = {
k1: 'v1',
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
} }
credentials_update_test(md) expected_md.each do |k, v|
expect(echo_service.received_md[0][k.to_s]).to eq(v)
end
end end
end end

@ -55,17 +55,20 @@ describe GRPC::ActiveCall do
end end
@ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil, @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
:this_channel_is_insecure) :this_channel_is_insecure)
@call = make_test_call
end end
after(:each) do after(:each) do
@server.shutdown_and_notify(deadline) @server.shutdown_and_notify(deadline)
@server.close @server.close
@server_thread.join @server_thread.join
# Don't rely on GC to unref the call, since that can prevent
# the channel connectivity state polling thread from shutting down.
@call.close
end end
describe 'restricted view methods' do describe 'restricted view methods' do
before(:each) do before(:each) do
@call = make_test_call
ActiveCall.client_invoke(@call) ActiveCall.client_invoke(@call)
@client_call = ActiveCall.new(@call, @pass_through, @client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
@ -117,9 +120,8 @@ describe GRPC::ActiveCall do
describe '#remote_send' do describe '#remote_send' do
it 'allows a client to send a payload to the server', test: true do it 'allows a client to send a payload to the server', test: true do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -137,15 +139,14 @@ describe GRPC::ActiveCall do
expect(server_call.remote_read).to eq(msg) expect(server_call.remote_read).to eq(msg)
# finish the call # finish the call
server_call.send_initial_metadata server_call.send_initial_metadata
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil) @call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call) send_and_receive_close_and_status(@call, recvd_call)
end end
it 'marshals the payload using the marshal func' do it 'marshals the payload using the marshal func' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call)
marshal = proc { |x| 'marshalled:' + x } marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal, @pass_through, deadline) client_call = ActiveCall.new(@call, marshal, @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -161,23 +162,22 @@ describe GRPC::ActiveCall do
metadata_received: true) metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg) expect(server_call.remote_read).to eq('marshalled:' + msg)
# finish the call # finish the call
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil) @call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call) send_and_receive_close_and_status(@call, recvd_call)
end end
TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS] TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS]
TEST_WRITE_FLAGS.each do |f| TEST_WRITE_FLAGS.each do |f|
it "successfully makes calls with write_flag set to #{f}" do it "successfully makes calls with write_flag set to #{f}" do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call)
marshal = proc { |x| 'marshalled:' + x } marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal, client_call = ActiveCall.new(@call, marshal,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.write_flag = f client_call.write_flag = f
client_call.remote_send(msg) client_call.remote_send(msg)
# flush the message in case writes are set to buffered # flush the message in case writes are set to buffered
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1 @call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
# confirm that the message was marshalled # confirm that the message was marshalled
recvd_rpc = @received_rpcs_queue.pop recvd_rpc = @received_rpcs_queue.pop
@ -199,9 +199,8 @@ describe GRPC::ActiveCall do
describe 'sending initial metadata', send_initial_metadata: true do describe 'sending initial metadata', send_initial_metadata: true do
it 'sends metadata before sending a message if it hasnt been sent yet' do it 'sends metadata before sending a message if it hasnt been sent yet' do
call = make_test_call
@client_call = ActiveCall.new( @client_call = ActiveCall.new(
call, @call,
@pass_through, @pass_through,
@pass_through, @pass_through,
deadline, deadline,
@ -213,13 +212,13 @@ describe GRPC::ActiveCall do
message = 'phony message' message = 'phony message'
expect(call).to( expect(@call).to(
receive(:run_batch) receive(:run_batch)
.with( .with(
hash_including( hash_including(
CallOps::SEND_INITIAL_METADATA => metadata)).once) CallOps::SEND_INITIAL_METADATA => metadata)).once)
expect(call).to( expect(@call).to(
receive(:run_batch).with(hash_including( receive(:run_batch).with(hash_including(
CallOps::SEND_MESSAGE => message)).once) CallOps::SEND_MESSAGE => message)).once)
@client_call.remote_send(message) @client_call.remote_send(message)
@ -228,14 +227,12 @@ describe GRPC::ActiveCall do
end end
it 'doesnt send metadata if it thinks its already been sent' do it 'doesnt send metadata if it thinks its already been sent' do
call = make_test_call @client_call = ActiveCall.new(@call,
@client_call = ActiveCall.new(call,
@pass_through, @pass_through,
@pass_through, @pass_through,
deadline) deadline)
expect(@client_call.metadata_sent).to eql(true) expect(@client_call.metadata_sent).to eql(true)
expect(call).to( expect(@call).to(
receive(:run_batch).with(hash_including( receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA)).never) CallOps::SEND_INITIAL_METADATA)).never)
@ -243,9 +240,7 @@ describe GRPC::ActiveCall do
end end
it 'sends metadata if it is explicitly sent and ok to do so' do it 'sends metadata if it is explicitly sent and ok to do so' do
call = make_test_call @client_call = ActiveCall.new(@call,
@client_call = ActiveCall.new(call,
@pass_through, @pass_through,
@pass_through, @pass_through,
deadline, deadline,
@ -257,7 +252,7 @@ describe GRPC::ActiveCall do
@client_call.merge_metadata_to_send(metadata) @client_call.merge_metadata_to_send(metadata)
expect(@client_call.metadata_to_send).to eq(metadata) expect(@client_call.metadata_to_send).to eq(metadata)
expect(call).to( expect(@call).to(
receive(:run_batch).with(hash_including( receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA => CallOps::SEND_INITIAL_METADATA =>
metadata)).once) metadata)).once)
@ -265,9 +260,7 @@ describe GRPC::ActiveCall do
end end
it 'explicit sending does nothing if metadata has already been sent' do it 'explicit sending does nothing if metadata has already been sent' do
call = make_test_call @client_call = ActiveCall.new(@call,
@client_call = ActiveCall.new(call,
@pass_through, @pass_through,
@pass_through, @pass_through,
deadline) deadline)
@ -284,7 +277,6 @@ describe GRPC::ActiveCall do
describe '#merge_metadata_to_send', merge_metadata_to_send: true do describe '#merge_metadata_to_send', merge_metadata_to_send: true do
it 'adds to existing metadata when there is existing metadata to send' do it 'adds to existing metadata when there is existing metadata to send' do
call = make_test_call
starting_metadata = { starting_metadata = {
k1: 'key1_val', k1: 'key1_val',
k2: 'key2_val', k2: 'key2_val',
@ -292,7 +284,7 @@ describe GRPC::ActiveCall do
} }
@client_call = ActiveCall.new( @client_call = ActiveCall.new(
call, @call,
@pass_through, @pass_through, @pass_through, @pass_through,
deadline, deadline,
started: false, started: false,
@ -318,9 +310,8 @@ describe GRPC::ActiveCall do
end end
it 'fails when initial metadata has already been sent' do it 'fails when initial metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new( @client_call = ActiveCall.new(
call, @call,
@pass_through, @pass_through,
@pass_through, @pass_through,
deadline, deadline,
@ -338,9 +329,8 @@ describe GRPC::ActiveCall do
describe '#client_invoke' do describe '#client_invoke' do
it 'sends metadata to the server when present' do it 'sends metadata to the server when present' do
call = make_test_call
metadata = { k1: 'v1', k2: 'v2' } metadata = { k1: 'v1', k2: 'v2' }
ActiveCall.client_invoke(call, metadata) ActiveCall.client_invoke(@call, metadata)
recvd_rpc = @received_rpcs_queue.pop recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call recvd_call = recvd_rpc.call
expect(recvd_call).to_not be_nil expect(recvd_call).to_not be_nil
@ -349,15 +339,14 @@ describe GRPC::ActiveCall do
expect(recvd_rpc.metadata['k2']).to eq('v2') expect(recvd_rpc.metadata['k2']).to eq('v2')
# finish the call # finish the call
recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {}) recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {})
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil) @call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call) send_and_receive_close_and_status(@call, recvd_call)
end end
end end
describe '#send_status', send_status: true do describe '#send_status', send_status: true do
it 'works when no metadata or messages have been sent yet' do it 'works when no metadata or messages have been sent yet' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call)
recvd_rpc = @received_rpcs_queue.pop recvd_rpc = @received_rpcs_queue.pop
server_call = ActiveCall.new( server_call = ActiveCall.new(
@ -375,9 +364,8 @@ describe GRPC::ActiveCall do
describe '#remote_read', remote_read: true do describe '#remote_read', remote_read: true do
it 'reads the response sent by a server' do it 'reads the response sent by a server' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -385,13 +373,12 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response') server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response') expect(client_call.remote_read).to eq('server_response')
send_and_receive_close_and_status( send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call)) @call, inner_call_of_active_call(server_call))
end end
it 'saves no metadata when the server adds no metadata' do it 'saves no metadata when the server adds no metadata' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -401,13 +388,12 @@ describe GRPC::ActiveCall do
client_call.remote_read client_call.remote_read
expect(client_call.metadata).to eq({}) expect(client_call.metadata).to eq({})
send_and_receive_close_and_status( send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call)) @call, inner_call_of_active_call(server_call))
end end
it 'saves metadata add by the server' do it 'saves metadata add by the server' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -418,12 +404,11 @@ describe GRPC::ActiveCall do
expected = { 'k1' => 'v1', 'k2' => 'v2' } expected = { 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected) expect(client_call.metadata).to eq(expected)
send_and_receive_close_and_status( send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call)) @call, inner_call_of_active_call(server_call))
end end
it 'get a status from server when nothing else sent from server' do it 'get a status from server when nothing else sent from server' do
client_call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(client_call)
recvd_rpc = @received_rpcs_queue.pop recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call recvd_call = recvd_rpc.call
@ -438,22 +423,21 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'OK') server_call.send_status(OK, 'OK')
# Check that we can receive initial metadata and a status # Check that we can receive initial metadata and a status
client_call.run_batch( @call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil) CallOps::RECV_INITIAL_METADATA => nil)
batch_result = client_call.run_batch( batch_result = @call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil) CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(OK) expect(batch_result.status.code).to eq(OK)
end end
it 'get a nil msg before a status when an OK status is sent' do it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) @call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response') server_call.remote_send('server_response')
server_call.send_status(OK, 'OK') server_call.send_status(OK, 'OK')
@ -463,10 +447,9 @@ describe GRPC::ActiveCall do
end end
it 'unmarshals the response using the unmarshal func' do it 'unmarshals the response using the unmarshal func' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call)
unmarshal = proc { |x| 'unmarshalled:' + x } unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @pass_through, client_call = ActiveCall.new(@call, @pass_through,
unmarshal, deadline) unmarshal, deadline)
# confirm the client receives the unmarshalled message # confirm the client receives the unmarshalled message
@ -476,14 +459,13 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response') server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response') expect(client_call.remote_read).to eq('unmarshalled:server_response')
send_and_receive_close_and_status( send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call)) @call, inner_call_of_active_call(server_call))
end end
end end
describe '#each_remote_read' do describe '#each_remote_read' do
it 'creates an Enumerator' do it 'creates an Enumerator' do
call = make_test_call client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
expect(client_call.each_remote_read).to be_a(Enumerator) expect(client_call.each_remote_read).to be_a(Enumerator)
# finish the call # finish the call
@ -491,9 +473,8 @@ describe GRPC::ActiveCall do
end end
it 'the returned enumerator can read n responses' do it 'the returned enumerator can read n responses' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
reply = 'server_response' reply = 'server_response'
@ -506,18 +487,17 @@ describe GRPC::ActiveCall do
expect(e.next).to eq(reply) expect(e.next).to eq(reply)
end end
send_and_receive_close_and_status( send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call)) @call, inner_call_of_active_call(server_call))
end end
it 'the returns an enumerator that stops after an OK Status' do it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
reply = 'server_response' reply = 'server_response'
client_call.remote_send(msg) client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) @call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
e = client_call.each_remote_read e = client_call.each_remote_read
n = 3 # arbitrary value > 1 n = 3 # arbitrary value > 1
@ -532,14 +512,13 @@ describe GRPC::ActiveCall do
describe '#closing the call from the client' do describe '#closing the call from the client' do
it 'finishes ok if the server sends a status response' do it 'finishes ok if the server sends a status response' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
expect do expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) @call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error end.to_not raise_error
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response') server_call.remote_send('server_response')
@ -549,9 +528,8 @@ describe GRPC::ActiveCall do
end end
it 'finishes ok if the server sends an early status response' do it 'finishes ok if the server sends an early status response' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -560,15 +538,14 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK') server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response') expect(client_call.remote_read).to eq('server_response')
expect do expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) @call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error end.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error expect { client_call.receive_and_check_status }.to_not raise_error
end end
it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do
call = make_test_call ActiveCall.client_invoke(@call)
ActiveCall.client_invoke(call) client_call = ActiveCall.new(@call, @pass_through,
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline) @pass_through, deadline)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -577,7 +554,7 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK') server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response') expect(client_call.remote_read).to eq('server_response')
expect do expect do
call.run_batch( @call.run_batch(
CallOps::SEND_CLOSE_FROM_CLIENT => nil, CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil) CallOps::RECV_STATUS_ON_CLIENT => nil)
end.to_not raise_error end.to_not raise_error
@ -631,6 +608,7 @@ describe GRPC::ActiveCall do
batch_result = @client_call.run_batch( batch_result = @client_call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil) CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(@server_status) expect(batch_result.status.code).to eq(@server_status)
@client_call.close
end end
it 'sends the initial metadata implicitly if not already sent' do it 'sends the initial metadata implicitly if not already sent' do

@ -41,14 +41,17 @@ class EchoService
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg) rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg
attr_reader :received_md attr_reader :received_md
attr_accessor :on_call_started
def initialize(**kw) def initialize(**kw)
@trailing_metadata = kw @trailing_metadata = kw
@received_md = [] @received_md = []
@on_call_started = nil
end end
def an_rpc(req, call) def an_rpc(req, call)
GRPC.logger.info('echo service received a request') GRPC.logger.info('echo service received a request')
on_call_started&.call(call)
call.output_metadata.update(@trailing_metadata) call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil? @received_md << call.metadata unless call.metadata.nil?
req req

@ -168,6 +168,25 @@ grpc_proto_fuzzer(
], ],
) )
grpc_proto_fuzzer(
name = "server_fuzzer_chttp2_fake_creds",
srcs = ["server_fuzzer_chttp2_fake_creds.cc"],
corpus = "server_fuzzer_chttp2_fake_creds_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":server_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer( grpc_proto_fuzzer(
name = "server_fuzzer_chaotic_good", name = "server_fuzzer_chaotic_good",
srcs = ["server_fuzzer_chaotic_good.cc"], srcs = ["server_fuzzer_chaotic_good.cc"],
@ -187,3 +206,60 @@ grpc_proto_fuzzer(
"//src/core:chaotic_good_server", "//src/core:chaotic_good_server",
], ],
) )
grpc_cc_library(
name = "connector_fuzzer",
srcs = ["connector_fuzzer.cc"],
hdrs = ["connector_fuzzer.h"],
external_deps = ["absl/log:check"],
deps = [
"fuzzer_input_proto",
"fuzzing_common",
"network_input",
"//:gpr",
"//:grpc",
"//src/core:channel_args",
"//test/core/event_engine/fuzzing_event_engine",
"//test/core/test_util:fuzz_config_vars",
"//test/core/test_util:grpc_test_util",
"//test/core/test_util:grpc_test_util_base",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2",
srcs = ["connector_fuzzer_chttp2.cc"],
corpus = "connector_fuzzer_chttp2_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2_fakesec",
srcs = ["connector_fuzzer_chttp2_fakesec.cc"],
corpus = "connector_fuzzer_chttp2_fakesec_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)

@ -0,0 +1,189 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
#include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/env.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
#include "test/core/end2end/fuzzers/network_input.h"
#include "test/core/test_util/fuzz_config_vars.h"
#include "test/core/test_util/test_config.h"
bool squelch = true;
bool leak_check = true;
using ::grpc_event_engine::experimental::ChannelArgsEndpointConfig;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::FuzzingEventEngine;
using ::grpc_event_engine::experimental::GetDefaultEventEngine;
using ::grpc_event_engine::experimental::MockEndpointController;
using ::grpc_event_engine::experimental::SetEventEngineFactory;
using ::grpc_event_engine::experimental::URIToResolvedAddress;
namespace grpc_core {
namespace {
class ConnectorFuzzer {
public:
ConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector)
: make_security_connector_(make_security_connector),
engine_([actions = msg.event_engine_actions()]() {
SetEventEngineFactory([actions]() -> std::unique_ptr<EventEngine> {
return std::make_unique<FuzzingEventEngine>(
FuzzingEventEngine::Options(), actions);
});
return std::dynamic_pointer_cast<FuzzingEventEngine>(
GetDefaultEventEngine());
}()),
mock_endpoint_controller_(MockEndpointController::Create(engine_)),
connector_(make_connector()) {
CHECK(engine_);
for (const auto& input : msg.network_input()) {
network_inputs_.push(input);
}
grpc_timer_manager_set_start_threaded(false);
grpc_init();
ExecCtx exec_ctx;
Executor::SetThreadingAll(false);
listener_ =
engine_
->CreateListener(
[this](std::unique_ptr<EventEngine::Endpoint> endpoint,
MemoryAllocator) {
if (network_inputs_.empty()) return;
ScheduleWrites(network_inputs_.front(), std::move(endpoint),
engine_.get());
network_inputs_.pop();
},
[](absl::Status) {}, ChannelArgsEndpointConfig(ChannelArgs{}),
std::make_unique<MemoryQuota>("foo"))
.value();
if (msg.has_shutdown_connector() &&
msg.shutdown_connector().delay_ms() > 0) {
auto shutdown_connector = msg.shutdown_connector();
const auto delay = Duration::Milliseconds(shutdown_connector.delay_ms());
engine_->RunAfterExactly(delay, [this, shutdown_connector = std::move(
shutdown_connector)]() {
if (connector_ == nullptr) return;
connector_->Shutdown(absl::Status(
static_cast<absl::StatusCode>(shutdown_connector.shutdown_status()),
shutdown_connector.shutdown_message()));
});
}
// Abbreviated runtime for interpreting API actions, since we simply don't
// support many here.
uint64_t when_ms = 0;
for (const auto& action : msg.api_actions()) {
switch (action.type_case()) {
default:
break;
case api_fuzzer::Action::kSleepMs:
when_ms += action.sleep_ms();
break;
case api_fuzzer::Action::kResizeResourceQuota:
engine_->RunAfterExactly(
Duration::Milliseconds(when_ms),
[this, new_size = action.resize_resource_quota()]() {
resource_quota_->memory_quota()->SetSize(new_size);
});
when_ms += 1;
break;
}
}
}
~ConnectorFuzzer() {
listener_.reset();
connector_.reset();
mock_endpoint_controller_.reset();
engine_->TickUntilIdle();
grpc_shutdown_blocking();
engine_->UnsetGlobalHooks();
}
void Run() {
grpc_resolved_address addr;
CHECK(grpc_parse_uri(URI::Parse("ipv4:127.0.0.1:1234").value(), &addr));
CHECK_OK(
listener_->Bind(URIToResolvedAddress("ipv4:127.0.0.1:1234").value()));
CHECK_OK(listener_->Start());
OrphanablePtr<grpc_endpoint> endpoint(
mock_endpoint_controller_->TakeCEndpoint());
SubchannelConnector::Result result;
bool done = false;
auto channel_args = ChannelArgs{}.SetObject<EventEngine>(engine_).SetObject(
resource_quota_);
auto security_connector = make_security_connector_();
if (security_connector != nullptr) {
channel_args = channel_args.SetObject(std::move(security_connector));
}
connector_->Connect(
SubchannelConnector::Args{&addr, nullptr,
Timestamp::Now() + Duration::Seconds(20),
channel_args},
&result, NewClosure([&done, &result](grpc_error_handle status) {
done = true;
if (status.ok()) result.transport->Orphan();
}));
while (!done) {
engine_->Tick();
grpc_timer_manager_tick();
}
}
private:
RefCountedPtr<ResourceQuota> resource_quota_ =
MakeRefCounted<ResourceQuota>("fuzzer");
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector_;
std::shared_ptr<FuzzingEventEngine> engine_;
std::queue<fuzzer_input::NetworkInput> network_inputs_;
std::shared_ptr<MockEndpointController> mock_endpoint_controller_;
std::unique_ptr<EventEngine::Listener> listener_;
OrphanablePtr<SubchannelConnector> connector_;
};
} // namespace
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector) {
if (squelch && !GetEnv("GRPC_TRACE_FUZZER").has_value()) {
grpc_disable_all_absl_logs();
}
static const int once = []() {
ForceEnableExperiment("event_engine_client", true);
ForceEnableExperiment("event_engine_listener", true);
return 42;
}();
CHECK_EQ(once, 42); // avoid unused variable warning
ApplyFuzzConfigVars(msg.config_vars());
TestOnlyReloadExperimentsFromConfigVariables();
ConnectorFuzzer(msg, make_security_connector, make_connector).Run();
}
} // namespace grpc_core

@ -0,0 +1,34 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#define GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#include "absl/functional/function_ref.h"
#include "src/core/client_channel/connector.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
namespace grpc_core {
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector);
}
#endif // GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_core::RefCountedPtr<grpc_channel_security_connector>();
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -0,0 +1,36 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/security_connector/fake/fake_security_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_fake_channel_security_connector_create(
grpc_core::RefCountedPtr<grpc_channel_credentials>(
grpc_fake_transport_security_credentials_create()),
nullptr, "foobar", grpc_core::ChannelArgs{});
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -172,6 +172,20 @@ message ChaoticGoodFrame {
message ChaoticGoodSettings {} message ChaoticGoodSettings {}
message FakeTransportFrame {
enum MessageString {
CLIENT_INIT = 0;
SERVER_INIT = 1;
CLIENT_FINISHED = 2;
SERVER_FINISHED = 3;
}
oneof payload {
bytes raw_bytes = 1;
MessageString message_string = 2;
}
}
message InputSegment { message InputSegment {
int32 delay_ms = 1; int32 delay_ms = 1;
oneof payload { oneof payload {
@ -187,6 +201,7 @@ message InputSegment {
H2ClientPrefix client_prefix = 11; H2ClientPrefix client_prefix = 11;
uint32 repeated_zeros = 12; uint32 repeated_zeros = 12;
ChaoticGoodFrame chaotic_good = 13; ChaoticGoodFrame chaotic_good = 13;
FakeTransportFrame fake_transport_frame = 14;
} }
} }
@ -204,10 +219,18 @@ message NetworkInput {
} }
} }
// Only for connector fuzzer, when to drop the connector
message ShutdownConnector {
int32 delay_ms = 1;
int32 shutdown_status = 2;
string shutdown_message = 3;
}
message Msg { message Msg {
repeated NetworkInput network_input = 1; repeated NetworkInput network_input = 1;
repeated api_fuzzer.Action api_actions = 2; repeated api_fuzzer.Action api_actions = 2;
fuzzing_event_engine.Actions event_engine_actions = 3; fuzzing_event_engine.Actions event_engine_actions = 3;
grpc.testing.FuzzConfigVars config_vars = 4; grpc.testing.FuzzConfigVars config_vars = 4;
grpc.testing.FuzzingChannelArgs channel_args = 5; grpc.testing.FuzzingChannelArgs channel_args = 5;
ShutdownConnector shutdown_connector = 6;
} }

@ -267,6 +267,13 @@ SliceBuffer ChaoticGoodFrame(const fuzzer_input::ChaoticGoodFrame& frame) {
return out; return out;
} }
void store32_little_endian(uint32_t value, unsigned char* buf) {
buf[3] = static_cast<unsigned char>((value >> 24) & 0xFF);
buf[2] = static_cast<unsigned char>((value >> 16) & 0xFF);
buf[1] = static_cast<unsigned char>((value >> 8) & 0xFF);
buf[0] = static_cast<unsigned char>((value) & 0xFF);
}
grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) { grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
switch (segment.payload_case()) { switch (segment.payload_case()) {
case fuzzer_input::InputSegment::kRawBytes: case fuzzer_input::InputSegment::kRawBytes:
@ -333,6 +340,38 @@ grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
.JoinIntoSlice() .JoinIntoSlice()
.TakeCSlice(); .TakeCSlice();
} break; } break;
case fuzzer_input::InputSegment::kFakeTransportFrame: {
auto generate = [](absl::string_view payload) {
uint32_t length = payload.length();
std::vector<unsigned char> bytes;
bytes.resize(4);
store32_little_endian(length + 4, bytes.data());
for (auto c : payload) {
bytes.push_back(static_cast<unsigned char>(c));
}
return grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(bytes.data()), bytes.size());
};
switch (segment.fake_transport_frame().payload_case()) {
case fuzzer_input::FakeTransportFrame::kRawBytes:
return generate(segment.fake_transport_frame().raw_bytes());
case fuzzer_input::FakeTransportFrame::kMessageString:
switch (segment.fake_transport_frame().message_string()) {
default:
return generate("UNKNOWN");
case fuzzer_input::FakeTransportFrame::CLIENT_INIT:
return generate("CLIENT_INIT");
case fuzzer_input::FakeTransportFrame::SERVER_INIT:
return generate("SERVER_INIT");
case fuzzer_input::FakeTransportFrame::CLIENT_FINISHED:
return generate("CLIENT_FINISHED");
case fuzzer_input::FakeTransportFrame::SERVER_FINISHED:
return generate("SERVER_FINISHED");
}
case fuzzer_input::FakeTransportFrame::PAYLOAD_NOT_SET:
return generate("");
}
}
case fuzzer_input::InputSegment::PAYLOAD_NOT_SET: case fuzzer_input::InputSegment::PAYLOAD_NOT_SET:
break; break;
} }
@ -545,4 +584,15 @@ Duration ScheduleConnection(
return delay; return delay;
} }
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine) {
auto schedule = MakeSchedule(network_input);
auto ep = std::shared_ptr<EventEngine::Endpoint>(std::move(endpoint));
ReadForever(ep);
ScheduleWritesForReads(ep, event_engine, std::move(schedule));
}
} // namespace grpc_core } // namespace grpc_core

@ -30,6 +30,12 @@ Duration ScheduleReads(
mock_endpoint_controller, mock_endpoint_controller,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine); grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
Duration ScheduleConnection( Duration ScheduleConnection(
const fuzzer_input::NetworkInput& network_input, const fuzzer_input::NetworkInput& network_input,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine, grpc_event_engine::experimental::FuzzingEventEngine* event_engine,

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc_security.h>
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/server_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunServerFuzzer(msg, [](grpc_server* server, int port_num,
const grpc_core::ChannelArgs&) {
auto* creds = grpc_fake_transport_security_server_credentials_create();
grpc_server_add_http2_port(
server, absl::StrCat("0.0.0.0:", port_num).c_str(), creds);
grpc_server_credentials_release(creds);
});
}

@ -67,12 +67,10 @@ void CancelAfterClientDone(
} }
CORE_END2END_TEST(CoreEnd2endTest, CancelAfterClientDone) { CORE_END2END_TEST(CoreEnd2endTest, CancelAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<CancelCancellationMode>()); CancelAfterClientDone(*this, std::make_unique<CancelCancellationMode>());
} }
CORE_END2END_TEST(CoreDeadlineTest, DeadlineAfterClientDone) { CORE_END2END_TEST(CoreDeadlineTest, DeadlineAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<DeadlineCancellationMode>()); CancelAfterClientDone(*this, std::make_unique<DeadlineCancellationMode>());
} }

@ -18,6 +18,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <algorithm> #include <algorithm>
#include <atomic>
#include <chrono> #include <chrono>
#include <limits> #include <limits>
#include <vector> #include <vector>
@ -32,6 +33,7 @@
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h" #include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/time.h" #include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/port.h"
#include "src/core/telemetry/stats.h" #include "src/core/telemetry/stats.h"
@ -189,7 +191,15 @@ void FuzzingEventEngine::TickUntilIdle() {
while (true) { while (true) {
{ {
grpc_core::MutexLock lock(&*mu_); grpc_core::MutexLock lock(&*mu_);
if (tasks_by_id_.empty()) return; LOG_EVERY_N_SEC(INFO, 5)
<< "TickUntilIdle: "
<< GRPC_DUMP_ARGS(tasks_by_id_.size(), outstanding_reads_.load(),
outstanding_writes_.load());
if (tasks_by_id_.empty() &&
outstanding_writes_.load(std::memory_order_relaxed) == 0 &&
outstanding_reads_.load(std::memory_order_relaxed) == 0) {
return;
}
} }
Tick(); Tick();
} }
@ -299,6 +309,9 @@ absl::Status FuzzingEventEngine::FuzzingListener::Start() {
bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) { bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
CHECK(!closed[index]); CHECK(!closed[index]);
const int peer_index = 1 - index; const int peer_index = 1 - index;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: entry "
<< GRPC_DUMP_ARGS(data->Length());
if (data->Length() == 0) return true; if (data->Length() == 0) return true;
size_t write_len = std::numeric_limits<size_t>::max(); size_t write_len = std::numeric_limits<size_t>::max();
// Check the write_sizes queue for fuzzer imposed restrictions on this write // Check the write_sizes queue for fuzzer imposed restrictions on this write
@ -315,12 +328,16 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
// byte. // byte.
if (write_len == 0) write_len = 1; if (write_len == 0) write_len = 1;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO) GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes"; << "WRITE[" << this << ":" << index << "]: " << write_len << " bytes; "
<< GRPC_DUMP_ARGS(pending_read[peer_index].has_value());
// Expand the pending buffer. // Expand the pending buffer.
size_t prev_len = pending[index].size(); size_t prev_len = pending[index].size();
pending[index].resize(prev_len + write_len); pending[index].resize(prev_len + write_len);
// Move bytes from the to-write data into the pending buffer. // Move bytes from the to-write data into the pending buffer.
data->MoveFirstNBytesIntoBuffer(write_len, pending[index].data() + prev_len); data->MoveFirstNBytesIntoBuffer(write_len, pending[index].data() + prev_len);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: post-move "
<< GRPC_DUMP_ARGS(data->Length());
// If there was a pending read, then we can fulfill it. // If there was a pending read, then we can fulfill it.
if (pending_read[peer_index].has_value()) { if (pending_read[peer_index].has_value()) {
pending_read[peer_index]->buffer->Append( pending_read[peer_index]->buffer->Append(
@ -328,7 +345,11 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
pending[index].clear(); pending[index].clear();
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kWrite, RunType::kWrite,
[cb = std::move(pending_read[peer_index]->on_read)]() mutable { [cb = std::move(pending_read[peer_index]->on_read), this, peer_index,
buffer = pending_read[peer_index]->buffer]() mutable {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "FINISH_READ[" << this << ":" << peer_index
<< "]: " << GRPC_DUMP_ARGS(buffer->Length());
cb(absl::OkStatus()); cb(absl::OkStatus());
}); });
pending_read[peer_index].reset(); pending_read[peer_index].reset();
@ -339,6 +360,10 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
bool FuzzingEventEngine::FuzzingEndpoint::Write( bool FuzzingEventEngine::FuzzingEndpoint::Write(
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data, absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
const WriteArgs*) { const WriteArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_WRITE[" << middle_.get() << ":" << my_index()
<< "]: " << data->Length() << " bytes";
IoToken write_token(&g_fuzzing_event_engine->outstanding_writes_);
grpc_core::global_stats().IncrementSyscallWrite(); grpc_core::global_stats().IncrementSyscallWrite();
grpc_core::MutexLock lock(&*mu_); grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]); CHECK(!middle_->closed[my_index()]);
@ -346,24 +371,38 @@ bool FuzzingEventEngine::FuzzingEndpoint::Write(
// If the write succeeds immediately, then we return true. // If the write succeeds immediately, then we return true.
if (middle_->Write(data, my_index())) return true; if (middle_->Write(data, my_index())) return true;
middle_->writing[my_index()] = true; middle_->writing[my_index()] = true;
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data); ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data,
std::move(write_token));
return false; return false;
} }
void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite( void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index, std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data) { absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) {
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kWrite, [middle = std::move(middle), index, data, RunType::kWrite,
on_writable = std::move(on_writable)]() mutable { [write_token = std::move(write_token), middle = std::move(middle), index,
data, on_writable = std::move(on_writable)]() mutable {
grpc_core::ReleasableMutexLock lock(&*mu_); grpc_core::ReleasableMutexLock lock(&*mu_);
CHECK(middle->writing[index]); CHECK(middle->writing[index]);
if (middle->closed[index]) { if (middle->closed[index]) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED[" << middle.get() << ":" << index << "]";
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, RunType::kRunAfter,
[on_writable = std::move(on_writable)]() mutable { [on_writable = std::move(on_writable)]() mutable {
on_writable(absl::InternalError("Endpoint closed")); on_writable(absl::InternalError("Endpoint closed"));
}); });
if (middle->pending_read[1 - index].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(
middle->pending_read[1 - index]->on_read)]() mutable {
cb(absl::InternalError("Endpoint closed"));
});
middle->pending_read[1 - index].reset();
}
return; return;
} }
if (middle->Write(data, index)) { if (middle->Write(data, index)) {
@ -373,14 +412,23 @@ void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
return; return;
} }
ScheduleDelayedWrite(std::move(middle), index, std::move(on_writable), ScheduleDelayedWrite(std::move(middle), index, std::move(on_writable),
data); data, std::move(write_token));
}); });
} }
FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() { FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
grpc_core::MutexLock lock(&*mu_); grpc_core::MutexLock lock(&*mu_);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSE[" << middle_.get() << ":" << my_index() << "]: "
<< GRPC_DUMP_ARGS(
middle_->closed[my_index()], middle_->closed[peer_index()],
middle_->pending_read[my_index()].has_value(),
middle_->pending_read[peer_index()].has_value(),
middle_->writing[my_index()], middle_->writing[peer_index()]);
middle_->closed[my_index()] = true; middle_->closed[my_index()] = true;
if (middle_->pending_read[my_index()].has_value()) { if (middle_->pending_read[my_index()].has_value()) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED_READING[" << middle_.get() << ":" << my_index() << "]";
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, RunType::kRunAfter,
[cb = std::move(middle_->pending_read[my_index()]->on_read)]() mutable { [cb = std::move(middle_->pending_read[my_index()]->on_read)]() mutable {
@ -388,7 +436,7 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
}); });
middle_->pending_read[my_index()].reset(); middle_->pending_read[my_index()].reset();
} }
if (!middle_->writing[peer_index()] && if (!middle_->writing[my_index()] &&
middle_->pending_read[peer_index()].has_value()) { middle_->pending_read[peer_index()].has_value()) {
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, RunType::kRunAfter,
@ -403,20 +451,25 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
bool FuzzingEventEngine::FuzzingEndpoint::Read( bool FuzzingEventEngine::FuzzingEndpoint::Read(
absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer, absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer,
const ReadArgs*) { const ReadArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_READ[" << middle_.get() << ":" << my_index() << "]";
buffer->Clear(); buffer->Clear();
IoToken read_token(&g_fuzzing_event_engine->outstanding_reads_);
grpc_core::MutexLock lock(&*mu_); grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]); CHECK(!middle_->closed[my_index()]);
if (middle_->pending[peer_index()].empty()) { if (middle_->pending[peer_index()].empty()) {
// If the endpoint is closed, fail asynchronously. // If the endpoint is closed, fail asynchronously.
if (middle_->closed[peer_index()]) { if (middle_->closed[peer_index()]) {
g_fuzzing_event_engine->RunLocked( g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, [on_read = std::move(on_read)]() mutable { RunType::kRunAfter,
[read_token, on_read = std::move(on_read)]() mutable {
on_read(absl::InternalError("Endpoint closed")); on_read(absl::InternalError("Endpoint closed"));
}); });
return false; return false;
} }
// If the endpoint has no pending data, then we need to wait for a write. // If the endpoint has no pending data, then we need to wait for a write.
middle_->pending_read[my_index()] = PendingRead{std::move(on_read), buffer}; middle_->pending_read[my_index()] =
PendingRead{std::move(read_token), std::move(on_read), buffer};
return false; return false;
} else { } else {
// If the endpoint has pending data, then we can fulfill the read // If the endpoint has pending data, then we can fulfill the read

@ -17,6 +17,7 @@
#include <stddef.h> #include <stddef.h>
#include <atomic>
#include <chrono> #include <chrono>
#include <cstdint> #include <cstdint>
#include <map> #include <map>
@ -124,6 +125,36 @@ class FuzzingEventEngine : public EventEngine {
} }
private: private:
class IoToken {
public:
IoToken() : refs_(nullptr) {}
explicit IoToken(std::atomic<size_t>* refs) : refs_(refs) {
refs_->fetch_add(1, std::memory_order_relaxed);
}
~IoToken() {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
}
IoToken(const IoToken& other) : refs_(other.refs_) {
if (refs_ != nullptr) refs_->fetch_add(1, std::memory_order_relaxed);
}
IoToken& operator=(const IoToken& other) {
IoToken copy(other);
Swap(copy);
return *this;
}
IoToken(IoToken&& other) noexcept
: refs_(std::exchange(other.refs_, nullptr)) {}
IoToken& operator=(IoToken&& other) noexcept {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
refs_ = std::exchange(other.refs_, nullptr);
return *this;
}
void Swap(IoToken& other) { std::swap(refs_, other.refs_); }
private:
std::atomic<size_t>* refs_;
};
enum class RunType { enum class RunType {
kWrite, kWrite,
kRunAfter, kRunAfter,
@ -183,6 +214,8 @@ class FuzzingEventEngine : public EventEngine {
// One read that's outstanding. // One read that's outstanding.
struct PendingRead { struct PendingRead {
// The associated io token
IoToken io_token;
// Callback to invoke when the read completes. // Callback to invoke when the read completes.
absl::AnyInvocable<void(absl::Status)> on_read; absl::AnyInvocable<void(absl::Status)> on_read;
// The buffer to read into. // The buffer to read into.
@ -243,8 +276,8 @@ class FuzzingEventEngine : public EventEngine {
// endpoint shutdown, it's believed this is a legal implementation. // endpoint shutdown, it's believed this is a legal implementation.
static void ScheduleDelayedWrite( static void ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index, std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data) absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); IoToken write_token) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::shared_ptr<EndpointMiddle> middle_; const std::shared_ptr<EndpointMiddle> middle_;
const int index_; const int index_;
}; };
@ -299,6 +332,8 @@ class FuzzingEventEngine : public EventEngine {
std::queue<std::queue<size_t>> write_sizes_for_future_connections_ std::queue<std::queue<size_t>> write_sizes_for_future_connections_
ABSL_GUARDED_BY(mu_); ABSL_GUARDED_BY(mu_);
grpc_pick_port_functions previous_pick_port_functions_; grpc_pick_port_functions previous_pick_port_functions_;
std::atomic<size_t> outstanding_writes_{0};
std::atomic<size_t> outstanding_reads_{0};
grpc_core::Mutex run_after_duration_callback_mu_; grpc_core::Mutex run_after_duration_callback_mu_;
absl::AnyInvocable<void(Duration)> run_after_duration_callback_ absl::AnyInvocable<void(Duration)> run_after_duration_callback_

@ -28,11 +28,12 @@
#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h" #include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
#include "test/core/test_util/test_config.h" #include "test/core/test_util/test_config.h"
namespace grpc_core { namespace grpc_core {
namespace { namespace {
void AssertIndex(const HPackTable* tbl, uint32_t idx, const char* key, void AssertIndex(HPackTable* tbl, uint32_t idx, const char* key,
const char* value) { const char* value) {
const auto* md = tbl->Lookup(idx); const auto* md = tbl->Lookup(idx);
ASSERT_NE(md, nullptr); ASSERT_NE(md, nullptr);
@ -113,6 +114,8 @@ TEST(HpackParserTableTest, ManyAdditions) {
ExecCtx exec_ctx; ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
for (i = 0; i < 100000; i++) { for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i); std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i); std::string value = absl::StrCat("VALUE.", i);
@ -134,6 +137,56 @@ TEST(HpackParserTableTest, ManyAdditions) {
value.c_str()); value.c_str());
} }
} }
auto stats_after = global_stats().Collect();
EXPECT_EQ(stats_after->http2_hpack_hits - stats_before->http2_hpack_hits,
100000);
EXPECT_EQ(stats_after->http2_hpack_misses, stats_before->http2_hpack_misses);
}
TEST(HpackParserTableTest, ManyUnusedAdditions) {
auto tbl = std::make_unique<HPackTable>();
int i;
ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
const Timestamp start = Timestamp::Now();
for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i);
auto key_slice = Slice::FromCopiedString(key);
auto value_slice = Slice::FromCopiedString(value);
auto memento = HPackTable::Memento{
ParsedMetadata<grpc_metadata_batch>(
ParsedMetadata<grpc_metadata_batch>::FromSlicePair{},
std::move(key_slice), std::move(value_slice),
key.length() + value.length() + 32),
nullptr};
ASSERT_TRUE(tbl->Add(std::move(memento)));
}
tbl.reset();
auto stats_after = global_stats().Collect();
const Timestamp end = Timestamp::Now();
EXPECT_EQ(stats_after->http2_hpack_hits, stats_before->http2_hpack_hits);
EXPECT_EQ(stats_after->http2_hpack_misses - stats_before->http2_hpack_misses,
100000);
size_t num_buckets_changed = 0;
const auto& lifetime_before = stats_before->http2_hpack_entry_lifetime;
const auto& lifetime_after = stats_after->http2_hpack_entry_lifetime;
for (size_t i = 0; i < lifetime_before.bucket_count(); i++) {
if (lifetime_before.buckets()[i] != lifetime_after.buckets()[i]) {
EXPECT_LE(i, lifetime_before.BucketFor((end - start).millis()));
num_buckets_changed++;
}
}
EXPECT_GT(num_buckets_changed, 0);
} }
} // namespace grpc_core } // namespace grpc_core

@ -112,6 +112,19 @@ grpc_cc_test(
], ],
) )
grpc_cc_test(
name = "unique_ptr_with_bitset_test",
srcs = ["unique_ptr_with_bitset_test.cc"],
external_deps = ["gtest"],
language = "C++",
uses_event_engine = False,
uses_polling = False,
deps = [
"//:gpr_platform",
"//src/core:unique_ptr_with_bitset",
],
)
grpc_cc_test( grpc_cc_test(
name = "useful_test", name = "useful_test",
srcs = ["useful_test.cc"], srcs = ["useful_test.cc"],

@ -0,0 +1,60 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#include "src/core/util/unique_ptr_with_bitset.h"
#include <stdint.h>
#include <limits>
#include <memory>
#include "gtest/gtest.h"
#include <grpc/support/port_platform.h>
namespace grpc_core {
TEST(UniquePtrWithBitsetTest, Basic) {
UniquePtrWithBitset<int, 1> ptr;
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(42));
EXPECT_EQ(*ptr, 42);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.SetBit(0);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.reset();
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.ClearBit(0);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(43));
ptr.SetBit(0);
UniquePtrWithBitset<int, 1> ptr2;
ptr2 = std::move(ptr);
EXPECT_EQ(*ptr2, 43);
EXPECT_EQ(ptr2.TestBit(0), true);
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -54,7 +54,6 @@ DOCKERIMAGE_CURRENT_VERSIONS = {
"tools/dockerfile/distribtest/python_python38_buster_aarch64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_python38_buster_aarch64@sha256:0a93bf2a0303aebe1280bafad69df228b9444af9144c767d8169ecc70fb383f6", "tools/dockerfile/distribtest/python_python38_buster_aarch64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_python38_buster_aarch64@sha256:0a93bf2a0303aebe1280bafad69df228b9444af9144c767d8169ecc70fb383f6",
"tools/dockerfile/distribtest/python_ubuntu2004_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2004_x64@sha256:288cf72bc98fc384b9352d1f6d258b3513925ffe5746dda7e2e343723dd5f733", "tools/dockerfile/distribtest/python_ubuntu2004_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2004_x64@sha256:288cf72bc98fc384b9352d1f6d258b3513925ffe5746dda7e2e343723dd5f733",
"tools/dockerfile/distribtest/python_ubuntu2204_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2204_x64@sha256:6054d639247a93af2b496f3c1ce48f63b2e07f5ba54e025f69bb232a747c644e", "tools/dockerfile/distribtest/python_ubuntu2204_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2204_x64@sha256:6054d639247a93af2b496f3c1ce48f63b2e07f5ba54e025f69bb232a747c644e",
"tools/dockerfile/distribtest/ruby_centos7_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_0.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_0@sha256:05c579d93764f12db1a60fa78a26e0f4d6179e54187a3a531c8ff955001731ec", "tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_0.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_0@sha256:05c579d93764f12db1a60fa78a26e0f4d6179e54187a3a531c8ff955001731ec",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_1.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_1@sha256:a48bb08275a588fbcea21b6b6056514b69454f6844bd7db9fd72c796892d02e1", "tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_1.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_1@sha256:a48bb08275a588fbcea21b6b6056514b69454f6844bd7db9fd72c796892d02e1",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_2.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_2@sha256:9604f8d07c3ea330cdc1ebe394f67828710bbfef52f0dc144e513e3627279b5a", "tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_2.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_2@sha256:9604f8d07c3ea330cdc1ebe394f67828710bbfef52f0dc144e513e3627279b5a",

@ -302,6 +302,10 @@ with open("src/core/telemetry/stats_data.h", "w") as H:
print(" public:", file=H) print(" public:", file=H)
print(" static int BucketFor(int value);", file=H) print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H) print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" size_t bucket_count() const { return %d; }" % shape.buckets,
file=H,
)
print( print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left," " friend Histogram_%d_%d operator-(const Histogram_%d_%d& left,"
" const Histogram_%d_%d& right);" " const Histogram_%d_%d& right);"

@ -1 +0,0 @@
us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64:b37e078e920ba1f75bd26bc67c2d3496432e36af@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96

@ -1,33 +0,0 @@
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM centos:7
RUN yum update -y && yum install -y curl tar which
# Install rvm
RUN gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
RUN curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.7
RUN /bin/bash -l -c "rvm install ruby-2.7"
RUN /bin/bash -l -c "rvm use --default ruby-2.7"
RUN /bin/bash -l -c "echo 'gem: --no-document' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.7' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-document"
RUN mkdir /var/local/jenkins
RUN /bin/bash -l -c "echo '. /etc/profile.d/rvm.sh' >> ~/.bashrc"

@ -2956,6 +2956,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \ src/core/util/time_precise.cc \
src/core/util/time_precise.h \ src/core/util/time_precise.h \
src/core/util/tmpfile.h \ src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \ src/core/util/upb_utils.h \
src/core/util/useful.h \ src/core/util/useful.h \
src/core/util/windows/cpu.cc \ src/core/util/windows/cpu.cc \

@ -2736,6 +2736,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \ src/core/util/time_precise.cc \
src/core/util/time_precise.h \ src/core/util/time_precise.h \
src/core/util/tmpfile.h \ src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \ src/core/util/upb_utils.h \
src/core/util/useful.h \ src/core/util/useful.h \
src/core/util/windows/cpu.cc \ src/core/util/windows/cpu.cc \

@ -2,21 +2,24 @@
This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix). This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix).
The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack The setup builds gRPC docker images for each language/runtime and upload it to Artifact Registry (AR). These images, encapsulating gRPC stack
from specific releases/tag, are used to test version compatibility between gRPC release versions. from specific releases/tag, are used to test version compatibility between gRPC release versions.
## Step-by-step instructions for adding a GCR image for a new release for compatibility test ## Step-by-step instructions for adding a AR docker image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are:
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server.
When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test.
The steps are:
- Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release. - Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release.
- Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do - Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do
- `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx python ruby php` - `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx python ruby php`
- Verify that the new docker image was built successfully and uploaded to GCR. For example, - Verify that the new docker image was built successfully and uploaded to AR. For example,
- `gcloud container images list --repository gcr.io/grpc-testing` lists available images. - `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public` lists available images.
- `gcloud container images list-tags gcr.io/grpc-testing/grpc_interop_java` should show an image entry with tag `v1.9.9`. - `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java --include-tags` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/gcr/images/grpc-testing?project=grpc-testing - images can also be viewed in https://pantheon.corp.google.com/artifacts/docker/grpc-testing/us/testing-images-public
- Verify the just-created docker client image would pass backward compatibility test (it should). For example, - Verify the just-created docker client image would pass backward compatibility test (it should). For example,
- `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java:v1.9.9` followed by - `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9` followed by
- `docker_image=gcr.io/grpc-testing/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master` - `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- Commit the change and create a PR to upstream/master. - Commit the change and create a PR to upstream/master.
- Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc - Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc
- Once tests pass, request a PR review. - Once tests pass, request a PR review.
@ -24,30 +27,34 @@ We have continuous nightly test setup to test gRPC backward compatibility betwee
For more details on each step, refer to sections below. For more details on each step, refer to sections below.
## Instructions for adding new language/runtimes ## Instructions for adding new language/runtimes
- Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`. - Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`.
- Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`. - Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`.
- Add language/runtimes to `client_matrix.py` following existing language/runtimes examples. - Add language/runtimes to `client_matrix.py` following existing language/runtimes examples.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to GCR. - Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to AR.
## Instructions for creating new test cases ## Instructions for creating new test cases
- Create test cases by running `LANG=<lang> [RELEASE=<release>] ./create_testcases.sh`. For example, - Create test cases by running `LANG=<lang> [RELEASE=<release>] ./create_testcases.sh`. For example,
- `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script. - `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script.
- `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi <image_id>`. - `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi <image_id>`.
- Stage and commit the generated test case file `./testcases/<lang>__<release>`. - Stage and commit the generated test case file `./testcases/<lang>__<release>`.
## Instructions for running test cases against GCR images ## Instructions for running test cases against AR docker images
- Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options: - Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options:
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the GCR images with the tag have been created using `create_matrix_images.py` above. - `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the AR images with the tag have been created using `create_matrix_images.py` above.
- `--language` specifies a language. Defaults to `--language=all`. - `--language` specifies a language. Defaults to `--language=all`.
For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`. For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`.
- The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml'). - The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml').
## Instructions for running test cases against a GCR image manually ## Instructions for running test cases against an AR image manually
- Download docker image from GCR. For example: `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0`.
- Download a docker image from AR. For example: `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0`.
- Run test cases by specifying `docker_image` variable inline with the test case script created above. - Run test cases by specifying `docker_image` variable inline with the test case script created above.
For example: For example:
- `docker_image=gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in GCR. - `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in AR.
Note: Note:
- File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`). - File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`).
- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing. - Creating and referencing images in AR require read and write permission to AR path us-docker.pkg.dev/grpc-testing.

@ -53,9 +53,9 @@ _BUILD_INFO = "/var/local/build_info"
argp = argparse.ArgumentParser(description="Run interop tests.") argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument( argp.add_argument(
"--gcr_path", "--docker_path",
default="gcr.io/grpc-testing", default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images in Google Container Registry", help="Path of docker images",
) )
argp.add_argument( argp.add_argument(
@ -175,7 +175,7 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
stack_base: the local gRPC repo path. stack_base: the local gRPC repo path.
""" """
basename = "grpc_interop_%s" % runtime basename = "grpc_interop_%s" % runtime
tag = "%s/%s:%s" % (args.gcr_path, basename, gcr_tag) tag = "%s/%s:%s" % (args.docker_path, basename, gcr_tag)
build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename} build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename}
build_env.update(env) build_env.update(env)
image_builder_path = _IMAGE_BUILDER image_builder_path = _IMAGE_BUILDER
@ -407,8 +407,8 @@ for lang in languages:
for image in docker_images: for image in docker_images:
if args.upload_images: if args.upload_images:
jobset.message("START", "Uploading %s" % image, do_newline=True) jobset.message("START", "Uploading %s" % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag> # docker image name must be in the format <docker_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(":") != -1 assert image.startswith(args.docker_path) and image.find(":") != -1
# Add a tag to exclude the image from the GCP Vulnerability Scanner. # Add a tag to exclude the image from the GCP Vulnerability Scanner.
(image_name, tag_name) = image.rsplit(":", 1) (image_name, tag_name) = image.rsplit(":", 1)
alternate_image = ( alternate_image = (

@ -56,9 +56,9 @@ _RELEASES = sorted(
argp = argparse.ArgumentParser(description="Run interop tests.") argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int) argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument( argp.add_argument(
"--gcr_path", "--docker_path",
default="gcr.io/grpc-testing", default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images in Google Container Registry", help="Path of docker images",
) )
argp.add_argument( argp.add_argument(
"--release", "--release",
@ -348,7 +348,9 @@ languages = args.language if args.language != ["all"] else _LANGUAGES
total_num_failures = 0 total_num_failures = 0
_xml_report_tree = report_utils.new_junit_xml_tree() _xml_report_tree = report_utils.new_junit_xml_tree()
for lang in languages: for lang in languages:
docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path) docker_images = _get_test_images_for_lang(
lang, args.release, args.docker_path
)
for runtime in sorted(docker_images.keys()): for runtime in sorted(docker_images.keys()):
total_num_failures += _run_tests_for_lang( total_num_failures += _run_tests_for_lang(
lang, runtime, docker_images[runtime], _xml_report_tree lang, runtime, docker_images[runtime], _xml_report_tree

@ -20,11 +20,8 @@ import %workspace%/tools/remote_build/include/rbe_base_config.bazelrc
# configure backend for remote execution # configure backend for remote execution
build --remote_executor=grpcs://remotebuildexecution.googleapis.com build --remote_executor=grpcs://remotebuildexecution.googleapis.com
build --spawn_strategy=remote # Very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
build --strategy=Javac=remote build --remote_timeout=7200
build --strategy=Closure=remote
build --genrule_strategy=remote
build --remote_timeout=7200 # very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
# In the remote execution environment, each test gets its own docker containers # In the remote execution environment, each test gets its own docker containers
# and port server won't be available. # and port server won't be available.

@ -502,7 +502,6 @@ def targets():
protobuf_version="3.25", protobuf_version="3.25",
presubmit=True, presubmit=True,
), ),
RubyDistribTest("linux", "x64", "centos7"),
RubyDistribTest("linux", "x64", "ubuntu2004"), RubyDistribTest("linux", "x64", "ubuntu2004"),
RubyDistribTest("linux", "x64", "ubuntu2204", presubmit=True), RubyDistribTest("linux", "x64", "ubuntu2204", presubmit=True),
# PHP7 # PHP7

@ -11419,6 +11419,30 @@
], ],
"uses_polling": false "uses_polling": false
}, },
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "unique_ptr_with_bitset_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{ {
"args": [], "args": [],
"benchmark": false, "benchmark": false,

Loading…
Cancel
Save