Merge master

pull/37386/head
tanvi-jagtap 4 months ago
commit 042e53f096
  1. 3
      BUILD
  2. 35
      CMakeLists.txt
  3. 1
      Package.swift
  4. 12
      bazel/grpc_deps.bzl
  5. 15
      build_autogenerated.yaml
  6. 1
      examples/cpp/otel/codelab/greeter_callback_client_solution.cc
  7. 2
      gRPC-C++.podspec
  8. 2
      gRPC-Core.podspec
  9. 1
      grpc.gemspec
  10. 1
      package.xml
  11. 11
      src/core/BUILD
  12. 17
      src/core/ext/transport/chaotic_good/client_transport.cc
  13. 3
      src/core/ext/transport/chaotic_good/frame.h
  14. 56
      src/core/ext/transport/chaotic_good/server_transport.cc
  15. 5
      src/core/ext/transport/chaotic_good/server_transport.h
  16. 6
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  17. 42
      src/core/ext/transport/chttp2/transport/hpack_parser_table.cc
  18. 33
      src/core/ext/transport/chttp2/transport/hpack_parser_table.h
  19. 9
      src/core/handshaker/security/secure_endpoint.cc
  20. 119
      src/core/handshaker/security/security_handshaker.cc
  21. 75
      src/core/lib/channel/promise_based_filter.cc
  22. 2
      src/core/lib/event_engine/ares_resolver.cc
  23. 8
      src/core/lib/experiments/experiments.yaml
  24. 15
      src/core/lib/gprpp/work_serializer.cc
  25. 4
      src/core/lib/iomgr/ev_epoll1_linux.cc
  26. 5
      src/core/lib/iomgr/event_engine_shims/endpoint.cc
  27. 8
      src/core/lib/iomgr/polling_entity.cc
  28. 14
      src/core/lib/iomgr/tcp_client_posix.cc
  29. 29
      src/core/lib/iomgr/tcp_posix.cc
  30. 11
      src/core/lib/iomgr/tcp_server_posix.cc
  31. 5
      src/core/lib/resource_quota/memory_quota.cc
  32. 6
      src/core/lib/resource_quota/memory_quota.h
  33. 11
      src/core/lib/security/authorization/grpc_server_authz_filter.cc
  34. 22
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  35. 10
      src/core/lib/slice/slice_refcount.h
  36. 2
      src/core/lib/surface/call.cc
  37. 5
      src/core/lib/transport/bdp_estimator.cc
  38. 10
      src/core/lib/transport/bdp_estimator.h
  39. 36
      src/core/lib/transport/call_spine.h
  40. 15
      src/core/lib/transport/connectivity_state.cc
  41. 25
      src/core/load_balancing/grpclb/grpclb.cc
  42. 10
      src/core/load_balancing/health_check_client.cc
  43. 6
      src/core/load_balancing/oob_backend_metric.cc
  44. 57
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  45. 80
      src/core/load_balancing/pick_first/pick_first.cc
  46. 31
      src/core/load_balancing/priority/priority.cc
  47. 5
      src/core/load_balancing/ring_hash/ring_hash.cc
  48. 99
      src/core/load_balancing/rls/rls.cc
  49. 24
      src/core/load_balancing/round_robin/round_robin.cc
  50. 62
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  51. 25
      src/core/load_balancing/weighted_target/weighted_target.cc
  52. 16
      src/core/load_balancing/xds/cds.cc
  53. 34
      src/core/load_balancing/xds/xds_cluster_impl.cc
  54. 15
      src/core/load_balancing/xds/xds_cluster_manager.cc
  55. 98
      src/core/load_balancing/xds/xds_override_host.cc
  56. 12
      src/core/load_balancing/xds/xds_wrr_locality.cc
  57. 29
      src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc
  58. 67
      src/core/telemetry/stats_data.cc
  59. 48
      src/core/telemetry/stats_data.h
  60. 8
      src/core/telemetry/stats_data.yaml
  61. 3
      src/core/tsi/fake_transport_security.cc
  62. 86
      src/core/util/unique_ptr_with_bitset.h
  63. 3
      src/core/xds/xds_client/xds_client.cc
  64. 13
      src/ruby/lib/grpc/generic/active_call.rb
  65. 37
      src/ruby/spec/call_spec.rb
  66. 6
      src/ruby/spec/channel_spec.rb
  67. 639
      src/ruby/spec/client_server_spec.rb
  68. 150
      src/ruby/spec/generic/active_call_spec.rb
  69. 3
      src/ruby/spec/support/services.rb
  70. 76
      test/core/end2end/fuzzers/BUILD
  71. 189
      test/core/end2end/fuzzers/connector_fuzzer.cc
  72. 34
      test/core/end2end/fuzzers/connector_fuzzer.h
  73. 30
      test/core/end2end/fuzzers/connector_fuzzer_chttp2.cc
  74. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_corpus/empty
  75. 36
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec.cc
  76. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec_corpus/empty
  77. 23
      test/core/end2end/fuzzers/fuzzer_input.proto
  78. 50
      test/core/end2end/fuzzers/network_input.cc
  79. 6
      test/core/end2end/fuzzers/network_input.h
  80. 30
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds.cc
  81. 1
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds_corpus/empty
  82. 2
      test/core/end2end/tests/cancel_after_client_done.cc
  83. 75
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.cc
  84. 39
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.h
  85. 55
      test/core/transport/chttp2/hpack_parser_table_test.cc
  86. 13
      test/core/util/BUILD
  87. 60
      test/core/util/unique_ptr_with_bitset_test.cc
  88. 1
      tools/bazelify_tests/dockerimage_current_versions.bzl
  89. 4
      tools/codegen/core/gen_stats_data.py
  90. 1
      tools/dockerfile/distribtest/ruby_centos7_x64.current_version
  91. 33
      tools/dockerfile/distribtest/ruby_centos7_x64/Dockerfile
  92. 1
      tools/doxygen/Doxyfile.c++.internal
  93. 1
      tools/doxygen/Doxyfile.core.internal
  94. 39
      tools/interop_matrix/README.md
  95. 12
      tools/interop_matrix/create_matrix_images.py
  96. 10
      tools/interop_matrix/run_interop_matrix_tests.py
  97. 7
      tools/remote_build/include/rbe_remote_execution.bazelrc
  98. 1
      tools/run_tests/artifacts/distribtest_targets.py
  99. 24
      tools/run_tests/generated/tests.json

@ -4035,6 +4035,7 @@ grpc_cc_library(
deps = [
"gpr",
"tsi_base",
"//src/core:dump_args",
"//src/core:slice",
"//src/core:useful",
],
@ -4569,11 +4570,13 @@ grpc_cc_library(
"gpr_platform",
"grpc_trace",
"hpack_parse_result",
"stats",
"//src/core:hpack_constants",
"//src/core:metadata_batch",
"//src/core:no_destruct",
"//src/core:parsed_metadata",
"//src/core:slice",
"//src/core:unique_ptr_with_bitset",
],
)

35
CMakeLists.txt generated

@ -1527,6 +1527,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx try_join_test)
add_dependencies(buildtests_cxx try_seq_metadata_test)
add_dependencies(buildtests_cxx try_seq_test)
add_dependencies(buildtests_cxx unique_ptr_with_bitset_test)
add_dependencies(buildtests_cxx unique_type_name_test)
add_dependencies(buildtests_cxx unknown_frame_bad_client_test)
add_dependencies(buildtests_cxx uri_parser_test)
@ -32484,6 +32485,40 @@ target_link_libraries(try_seq_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(unique_ptr_with_bitset_test
test/core/util/unique_ptr_with_bitset_test.cc
)
target_compile_features(unique_ptr_with_bitset_test PUBLIC cxx_std_14)
target_include_directories(unique_ptr_with_bitset_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(unique_ptr_with_bitset_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
absl::check
absl::bits
)
endif()
if(gRPC_BUILD_TESTS)

1
Package.swift generated

@ -1950,6 +1950,7 @@ let package = Package(
"src/core/util/time_precise.cc",
"src/core/util/time_precise.h",
"src/core/util/tmpfile.h",
"src/core/util/unique_ptr_with_bitset.h",
"src/core/util/upb_utils.h",
"src/core/util/useful.h",
"src/core/util/windows/cpu.cc",

@ -23,10 +23,10 @@ def grpc_deps():
if "platforms" not in native.existing_rules():
http_archive(
name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
],
)
@ -168,10 +168,10 @@ def grpc_deps():
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f",
)
if "bazel_compdb" not in native.existing_rules():

@ -1219,6 +1219,7 @@ libs:
- src/core/util/latent_see.h
- src/core/util/ring_buffer.h
- src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h
- src/core/xds/grpc/certificate_provider_store.h
- src/core/xds/grpc/file_watcher_certificate_provider_factory.h
@ -2705,6 +2706,7 @@ libs:
- src/core/util/latent_see.h
- src/core/util/ring_buffer.h
- src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h
- third_party/upb/upb/generated_code_support.h
src:
@ -20491,6 +20493,19 @@ targets:
- absl/status:statusor
- gpr
uses_polling: false
- name: unique_ptr_with_bitset_test
gtest: true
build: test
language: c++
headers:
- src/core/util/unique_ptr_with_bitset.h
src:
- test/core/util/unique_ptr_with_bitset_test.cc
deps:
- gtest
- absl/log:check
- absl/numeric:bits
uses_polling: false
- name: unique_type_name_test
gtest: true
build: test

@ -128,7 +128,6 @@ void RunClient(const std::string& target_str) {
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
// CODELAB HINT : Add code to register OpenTelemetry plugin here.
// Register a global gRPC OpenTelemetry plugin configured with a prometheus
// exporter.
opentelemetry::exporter::metrics::PrometheusExporterOptions opts;

2
gRPC-C++.podspec generated

@ -1326,6 +1326,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',
@ -2609,6 +2610,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',

2
gRPC-Core.podspec generated

@ -2066,6 +2066,7 @@ Pod::Spec.new do |s|
'src/core/util/time_precise.cc',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/util/windows/cpu.cc',
@ -3389,6 +3390,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',

1
grpc.gemspec generated

@ -1952,6 +1952,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/util/time_precise.cc )
s.files += %w( src/core/util/time_precise.h )
s.files += %w( src/core/util/tmpfile.h )
s.files += %w( src/core/util/unique_ptr_with_bitset.h )
s.files += %w( src/core/util/upb_utils.h )
s.files += %w( src/core/util/useful.h )
s.files += %w( src/core/util/windows/cpu.cc )

1
package.xml generated

@ -1934,6 +1934,7 @@
<file baseinstalldir="/" name="src/core/util/time_precise.cc" role="src" />
<file baseinstalldir="/" name="src/core/util/time_precise.h" role="src" />
<file baseinstalldir="/" name="src/core/util/tmpfile.h" role="src" />
<file baseinstalldir="/" name="src/core/util/unique_ptr_with_bitset.h" role="src" />
<file baseinstalldir="/" name="src/core/util/upb_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/util/useful.h" role="src" />
<file baseinstalldir="/" name="src/core/util/windows/cpu.cc" role="src" />

@ -289,6 +289,17 @@ grpc_cc_library(
deps = ["//:gpr_platform"],
)
grpc_cc_library(
name = "unique_ptr_with_bitset",
hdrs = ["util/unique_ptr_with_bitset.h"],
external_deps = [
"absl/log:check",
"absl/numeric:bits",
],
language = "c++",
deps = ["//:gpr_platform"],
)
grpc_cc_library(
name = "examine_stack",
srcs = [

@ -254,7 +254,11 @@ uint32_t ChaoticGoodClientTransport::MakeStream(CallHandler call_handler) {
const uint32_t stream_id = next_stream_id_++;
stream_map_.emplace(stream_id, call_handler);
lock.Release();
call_handler.OnDone([this, stream_id]() {
call_handler.OnDone([this, stream_id](bool cancelled) {
if (cancelled) {
outgoing_frames_.MakeSender().UnbufferedImmediateSend(
CancelFrame{stream_id});
}
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
});
@ -317,18 +321,17 @@ void ChaoticGoodClientTransport::StartCall(CallHandler call_handler) {
"outbound_loop", [self = RefAsSubclass<ChaoticGoodClientTransport>(),
call_handler]() mutable {
const uint32_t stream_id = self->MakeStream(call_handler);
return Map(self->CallOutboundLoop(stream_id, call_handler),
return Map(
self->CallOutboundLoop(stream_id, call_handler),
[stream_id, sender = self->outgoing_frames_.MakeSender()](
absl::Status result) mutable {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Call " << stream_id
<< " finished with " << result.ToString();
<< "CHAOTIC_GOOD: Call " << stream_id << " finished with "
<< result.ToString();
if (!result.ok()) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel";
CancelFrame frame;
frame.stream_id = stream_id;
if (!sender.UnbufferedImmediateSend(std::move(frame))) {
if (!sender.UnbufferedImmediateSend(CancelFrame{stream_id})) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel failed";
}

@ -156,6 +156,9 @@ struct ServerFragmentFrame final : public FrameInterface {
};
struct CancelFrame final : public FrameInterface {
CancelFrame() = default;
explicit CancelFrame(uint32_t stream_id) : stream_id(stream_id) {}
absl::Status Deserialize(HPackParser* parser, const FrameHeader& header,
absl::BitGenRef bitsrc, Arena* arena,
BufferPair buffers, FrameLimits limits) override;

@ -72,8 +72,7 @@ auto ChaoticGoodServerTransport::TransportWriteLoop(
}
auto ChaoticGoodServerTransport::PushFragmentIntoCall(
CallInitiator call_initiator, ClientFragmentFrame frame,
uint32_t stream_id) {
CallInitiator call_initiator, ClientFragmentFrame frame) {
DCHECK(frame.headers == nullptr);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString();
@ -84,17 +83,15 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
std::move(frame.message->message));
},
[]() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id](StatusFlag status) mutable -> StatusFlag {
[call_initiator, end_of_stream = frame.end_of_stream](
StatusFlag status) mutable -> StatusFlag {
if (!status.ok() && GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: Failed PushFragmentIntoCall";
}
if (end_of_stream || !status.ok()) {
call_initiator.FinishSends();
// We have received end_of_stream. It is now safe to remove
// the call from the stream map.
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
// Note that we cannot remove from the stream map yet, as we
// may yet receive a cancellation.
}
return Success{};
});
@ -102,17 +99,16 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
absl::optional<CallInitiator> call_initiator, absl::Status error,
ClientFragmentFrame frame, uint32_t stream_id) {
ClientFragmentFrame frame) {
return If(
call_initiator.has_value() && error.ok(),
[this, &call_initiator, &frame, &stream_id]() {
[this, &call_initiator, &frame]() {
return Map(
call_initiator->SpawnWaitable(
"push-fragment",
[call_initiator, frame = std::move(frame), stream_id,
this]() mutable {
return call_initiator->CancelIfFails(PushFragmentIntoCall(
*call_initiator, std::move(frame), stream_id));
[call_initiator, frame = std::move(frame), this]() mutable {
return call_initiator->CancelIfFails(
PushFragmentIntoCall(*call_initiator, std::move(frame)));
}),
[](StatusFlag status) { return StatusCast<absl::Status>(status); });
},
@ -255,8 +251,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToNewCall(
}
}
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame),
frame_header.stream_id);
std::move(fragment_frame));
}
auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
@ -271,8 +266,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
frame_header, std::move(buffers), arena, fragment_frame,
FrameLimits{1024 * 1024 * 1024, aligned_bytes_ - 1});
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame),
frame_header.stream_id);
std::move(fragment_frame));
}
auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
@ -305,6 +299,10 @@ auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
[this, &frame_header]() {
absl::optional<CallInitiator> call_initiator =
ExtractStream(frame_header.stream_id);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "Cancel stream " << frame_header.stream_id
<< (call_initiator.has_value() ? " (active)"
: " (not found)");
return If(
call_initiator.has_value(),
[&call_initiator]() {
@ -410,6 +408,8 @@ void ChaoticGoodServerTransport::AbortWithError() {
absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " LookupStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt;
@ -418,6 +418,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " ExtractStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt;
@ -428,6 +430,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
absl::Status ChaoticGoodServerTransport::NewStream(
uint32_t stream_id, CallInitiator call_initiator) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " NewStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it != stream_map_.end()) {
@ -437,9 +441,19 @@ absl::Status ChaoticGoodServerTransport::NewStream(
return absl::InternalError("Stream id is not increasing");
}
stream_map_.emplace(stream_id, call_initiator);
call_initiator.OnDone([this, stream_id]() {
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
call_initiator.OnDone(
[self = RefAsSubclass<ChaoticGoodServerTransport>(), stream_id](bool) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << self.get() << " OnDone " << stream_id;
absl::optional<CallInitiator> call_initiator =
self->ExtractStream(stream_id);
if (call_initiator.has_value()) {
auto c = std::move(*call_initiator);
c.SpawnInfallible("cancel", [c]() mutable {
c.Cancel();
return Empty{};
});
}
});
return absl::OkStatus();
}

@ -131,10 +131,9 @@ class ChaoticGoodServerTransport final : public ServerTransport {
FrameHeader frame_header, BufferPair buffers,
ChaoticGoodTransport& transport);
auto MaybePushFragmentIntoCall(absl::optional<CallInitiator> call_initiator,
absl::Status error, ClientFragmentFrame frame,
uint32_t stream_id);
absl::Status error, ClientFragmentFrame frame);
auto PushFragmentIntoCall(CallInitiator call_initiator,
ClientFragmentFrame frame, uint32_t stream_id);
ClientFragmentFrame frame);
RefCountedPtr<UnstartedCallDestination> call_destination_;
const RefCountedPtr<CallArenaAllocator> call_arena_allocator_;

@ -713,7 +713,7 @@ class HPackParser::Parser {
LOG(INFO) << "HTTP:" << log_info_.stream_id << ":" << type << ":"
<< (log_info_.is_client ? "CLI" : "SVR") << ": "
<< memento.md.DebugString()
<< (memento.parse_status == nullptr
<< (memento.parse_status.get() == nullptr
? ""
: absl::StrCat(
" (parse error: ",
@ -724,7 +724,7 @@ class HPackParser::Parser {
void EmitHeader(const HPackTable::Memento& md) {
// Pass up to the transport
state_.frame_length += md.md.transport_size();
if (md.parse_status != nullptr) {
if (md.parse_status.get() != nullptr) {
// Reject any requests with invalid metadata.
input_->SetErrorAndContinueParsing(*md.parse_status);
}
@ -974,7 +974,7 @@ class HPackParser::Parser {
} else {
const auto* memento = absl::get<const HPackTable::Memento*>(state_.key);
key_string = memento->md.key();
if (state_.field_error.ok() && memento->parse_status != nullptr) {
if (state_.field_error.ok() && memento->parse_status.get() != nullptr) {
input_->SetErrorAndContinueParsing(*memento->parse_status);
}
}

@ -37,6 +37,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_parse_result.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
namespace grpc_core {
@ -47,6 +48,10 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
return entries_.push_back(std::move(m));
}
size_t index = (first_entry_ + num_entries_) % max_entries_;
if (timestamp_index_ == kNoTimestamp) {
timestamp_index_ = index;
timestamp_ = Timestamp::Now();
}
entries_[index] = std::move(m);
++num_entries_;
}
@ -54,12 +59,31 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
auto HPackTable::MementoRingBuffer::PopOne() -> Memento {
CHECK_GT(num_entries_, 0u);
size_t index = first_entry_ % max_entries_;
if (index == timestamp_index_) {
global_stats().IncrementHttp2HpackEntryLifetime(
(Timestamp::Now() - timestamp_).millis());
timestamp_index_ = kNoTimestamp;
}
++first_entry_;
--num_entries_;
return std::move(entries_[index]);
auto& entry = entries_[index];
if (!entry.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
return std::move(entry);
}
auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) const
auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) -> const Memento* {
if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
auto& entry = entries_[offset];
const bool was_used = entry.parse_status.TestBit(Memento::kUsedBit);
entry.parse_status.SetBit(Memento::kUsedBit);
if (!was_used) global_stats().IncrementHttp2HpackHits();
return &entry;
}
auto HPackTable::MementoRingBuffer::Peek(uint32_t index) const
-> const Memento* {
if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
@ -79,14 +103,22 @@ void HPackTable::MementoRingBuffer::Rebuild(uint32_t max_entries) {
entries_.swap(entries);
}
void HPackTable::MementoRingBuffer::ForEach(
absl::FunctionRef<void(uint32_t, const Memento&)> f) const {
template <typename F>
void HPackTable::MementoRingBuffer::ForEach(F f) const {
uint32_t index = 0;
while (auto* m = Lookup(index++)) {
while (auto* m = Peek(index++)) {
f(index, *m);
}
}
HPackTable::MementoRingBuffer::~MementoRingBuffer() {
ForEach([](uint32_t, const Memento& m) {
if (!m.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
});
}
// Evict one element from the table
void HPackTable::EvictOne() {
auto first_entry = entries_.PopOne();

@ -21,6 +21,8 @@
#include <stdint.h>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <vector>
@ -34,6 +36,7 @@
#include "src/core/lib/gprpp/no_destruct.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/parsed_metadata.h"
#include "src/core/util/unique_ptr_with_bitset.h"
namespace grpc_core {
@ -54,11 +57,14 @@ class HPackTable {
struct Memento {
ParsedMetadata<grpc_metadata_batch> md;
std::unique_ptr<HpackParseResult> parse_status;
// Alongside parse_status we store one bit indicating whether this memento
// has been looked up (and therefore consumed) or not.
UniquePtrWithBitset<HpackParseResult, 1> parse_status;
static const int kUsedBit = 0;
};
// Lookup, but don't ref.
const Memento* Lookup(uint32_t index) const {
const Memento* Lookup(uint32_t index) {
// Static table comes first, just return an entry from it.
// NB: This imposes the constraint that the first
// GRPC_CHTTP2_LAST_STATIC_ENTRY entries in the core static metadata table
@ -97,6 +103,14 @@ class HPackTable {
class MementoRingBuffer {
public:
MementoRingBuffer() {}
~MementoRingBuffer();
MementoRingBuffer(const MementoRingBuffer&) = delete;
MementoRingBuffer& operator=(const MementoRingBuffer&) = delete;
MementoRingBuffer(MementoRingBuffer&&) = default;
MementoRingBuffer& operator=(MementoRingBuffer&&) = default;
// Rebuild this buffer with a new max_entries_ size.
void Rebuild(uint32_t max_entries);
@ -109,10 +123,11 @@ class HPackTable {
Memento PopOne();
// Lookup the entry at index, or return nullptr if none exists.
const Memento* Lookup(uint32_t index) const;
const Memento* Lookup(uint32_t index);
const Memento* Peek(uint32_t index) const;
void ForEach(absl::FunctionRef<void(uint32_t dynamic_index, const Memento&)>
f) const;
template <typename F>
void ForEach(F f) const;
uint32_t max_entries() const { return max_entries_; }
uint32_t num_entries() const { return num_entries_; }
@ -126,11 +141,17 @@ class HPackTable {
// Maximum number of entries we could possibly fit in the table, given
// defined overheads.
uint32_t max_entries_ = hpack_constants::kInitialTableEntries;
// Which index holds a timestamp (or kNoTimestamp if none do).
static constexpr uint32_t kNoTimestamp =
std::numeric_limits<uint32_t>::max();
uint32_t timestamp_index_ = kNoTimestamp;
// The timestamp associated with timestamp_entry_.
Timestamp timestamp_;
std::vector<Memento> entries_;
};
const Memento* LookupDynamic(uint32_t index) const {
const Memento* LookupDynamic(uint32_t index) {
// Not static - find the value in the list of valid entries
const uint32_t tbl_index = index - (hpack_constants::kLastStaticEntry + 1);
return entries_.Lookup(tbl_index);

@ -108,7 +108,6 @@ struct secure_endpoint : public grpc_endpoint {
}
~secure_endpoint() {
memory_owner.Reset();
tsi_frame_protector_destroy(protector);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
grpc_slice_buffer_destroy(&source_buffer);
@ -380,9 +379,12 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
static void on_write(void* user_data, grpc_error_handle error) {
secure_endpoint* ep = static_cast<secure_endpoint*>(user_data);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, std::exchange(ep->write_cb, nullptr),
std::move(error));
grpc_closure* cb = ep->write_cb;
ep->write_cb = nullptr;
SECURE_ENDPOINT_UNREF(ep, "write");
grpc_core::EnsureRunInExecCtx([cb, error = std::move(error)]() {
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
});
}
static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
@ -505,6 +507,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->wrapped_ep.reset();
ep->memory_owner.Reset();
SECURE_ENDPOINT_UNREF(ep, "destroy");
}

@ -88,27 +88,27 @@ class SecurityHandshaker : public Handshaker {
private:
grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received,
size_t bytes_received_size);
size_t bytes_received_size)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
grpc_error_handle OnHandshakeNextDoneLocked(
tsi_result result, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
void HandshakeFailedLocked(absl::Status error);
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void HandshakeFailedLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Finish(absl::Status status);
void OnHandshakeDataReceivedFromPeerFn(absl::Status error);
void OnHandshakeDataSentToPeerFn(absl::Status error);
static void OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error);
static void OnHandshakeDataSentToPeerFnScheduler(void* arg,
grpc_error_handle error);
void OnHandshakeDataReceivedFromPeerFnScheduler(grpc_error_handle error);
void OnHandshakeDataSentToPeerFnScheduler(grpc_error_handle error);
static void OnHandshakeNextDoneGrpcWrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
static void OnPeerCheckedFn(void* arg, grpc_error_handle error);
void OnPeerCheckedInner(grpc_error_handle error);
void OnPeerCheckedFn(grpc_error_handle error);
size_t MoveReadBufferIntoHandshakeBuffer();
grpc_error_handle CheckPeerLocked();
grpc_error_handle CheckPeerLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// State set at creation time.
tsi_handshaker* handshaker_;
@ -125,13 +125,11 @@ class SecurityHandshaker : public Handshaker {
size_t handshake_buffer_size_;
unsigned char* handshake_buffer_;
SliceBuffer outgoing_;
grpc_closure on_handshake_data_sent_to_peer_;
grpc_closure on_handshake_data_received_from_peer_;
grpc_closure on_peer_checked_;
RefCountedPtr<grpc_auth_context> auth_context_;
tsi_handshaker_result* handshaker_result_ = nullptr;
size_t max_frame_size_ = 0;
std::string tsi_handshake_error_;
grpc_closure* on_peer_checked_ ABSL_GUARDED_BY(mu_) = nullptr;
};
SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
@ -143,10 +141,7 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
handshake_buffer_(
static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))),
max_frame_size_(
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {
GRPC_CLOSURE_INIT(&on_peer_checked_, &SecurityHandshaker::OnPeerCheckedFn,
this, grpc_schedule_on_exec_ctx);
}
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {}
SecurityHandshaker::~SecurityHandshaker() {
tsi_handshaker_destroy(handshaker_);
@ -220,8 +215,9 @@ MakeChannelzSecurityFromAuthContext(grpc_auth_context* auth_context) {
} // namespace
void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
void SecurityHandshaker::OnPeerCheckedFn(grpc_error_handle error) {
MutexLock lock(&mu_);
on_peer_checked_ = nullptr;
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(error);
return;
@ -317,11 +313,6 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
Finish(absl::OkStatus());
}
void SecurityHandshaker::OnPeerCheckedFn(void* arg, grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker>(static_cast<SecurityHandshaker*>(arg))
->OnPeerCheckedInner(error);
}
grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
tsi_peer peer;
tsi_result result =
@ -330,8 +321,12 @@ grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (",
tsi_result_to_string(result), ")"));
}
on_peer_checked_ = NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnPeerCheckedFn(std::move(status));
});
connector_->check_peer(peer, args_->endpoint.get(), args_->args,
&auth_context_, &on_peer_checked_);
&auth_context_, on_peer_checked_);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME);
const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
@ -356,10 +351,10 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
CHECK_EQ(bytes_to_send_size, 0u);
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
return error;
}
@ -387,19 +382,19 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size));
grpc_endpoint_write(
args_->endpoint.get(), outgoing_.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_sent_to_peer_,
&SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler, this,
grpc_schedule_on_exec_ctx),
NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnHandshakeDataSentToPeerFnScheduler(std::move(status));
}),
nullptr, /*max_frame_size=*/INT_MAX);
} else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer.
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
// Handshake has finished, check peer and so on.
@ -418,8 +413,6 @@ void SecurityHandshaker::OnHandshakeNextDoneGrpcWrapper(
result, bytes_to_send, bytes_to_send_size, handshaker_result);
if (!error.ok()) {
h->HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
}
}
@ -429,13 +422,15 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
const unsigned char* bytes_to_send = nullptr;
size_t bytes_to_send_size = 0;
tsi_handshaker_result* hs_result = nullptr;
auto self = RefAsSubclass<SecurityHandshaker>();
tsi_result result = tsi_handshaker_next(
handshaker_, bytes_received, bytes_received_size, &bytes_to_send,
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper, this,
&tsi_handshake_error_);
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper,
self.get(), &tsi_handshake_error_);
if (result == TSI_ASYNC) {
// Handshaker operating asynchronously. Nothing else to do here;
// callback will be invoked in a TSI thread.
// Handshaker operating asynchronously. Callback will be invoked in a TSI
// thread. We no longer own the ref held in self.
self.release();
return absl::OkStatus();
}
// Handshaker returned synchronously. Invoke callback directly in
@ -449,18 +444,18 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataReceivedFromPeerFn(std::move(error));
self->OnHandshakeDataReceivedFromPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -473,8 +468,6 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(std::move(error));
} else {
handshaker.release(); // Avoid unref
}
}
@ -483,18 +476,18 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataSentToPeerFn(std::move(error));
self->OnHandshakeDataSentToPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -505,10 +498,10 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
if (handshaker_result_ == nullptr) {
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
error = CheckPeerLocked();
@ -517,7 +510,6 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
return;
}
}
handshaker.release(); // Avoid unref
}
//
@ -528,7 +520,7 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
is_shutdown_ = true;
connector_->cancel_check_peer(&on_peer_checked_, std::move(error));
connector_->cancel_check_peer(on_peer_checked_, std::move(error));
tsi_handshaker_shutdown(handshaker_);
args_->endpoint.reset();
}
@ -537,7 +529,6 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
void SecurityHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
auto ref = Ref();
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = std::move(on_handshake_done);
@ -546,8 +537,6 @@ void SecurityHandshaker::DoHandshake(
DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(error);
} else {
ref.release(); // Avoid unref
}
}

@ -207,10 +207,8 @@ void BaseCallData::CapturedBatch::ResumeWith(Flusher* releaser) {
uintptr_t& refcnt = *RefCountField(batch);
if (refcnt == 0) {
// refcnt==0 ==> cancelled
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << releaser->call()->DebugTag()
<< "RESUME BATCH REQUEST CANCELLED";
}
GRPC_TRACE_LOG(channel, INFO)
<< releaser->call()->DebugTag() << "RESUME BATCH REQUEST CANCELLED";
return;
}
if (--refcnt == 0) {
@ -266,10 +264,9 @@ BaseCallData::Flusher::~Flusher() {
auto* batch = static_cast<grpc_transport_stream_op_batch*>(p);
BaseCallData* call =
static_cast<BaseCallData*>(batch->handler_private.extra_arg);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << "FLUSHER:forward batch via closure: "
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:forward batch via closure: "
<< grpc_transport_stream_op_batch_string(batch, false);
}
grpc_call_next_op(call->elem(), batch);
GRPC_CALL_STACK_UNREF(call->call_stack(), "flusher_batch");
};
@ -278,10 +275,9 @@ BaseCallData::Flusher::~Flusher() {
if (call_->call() != nullptr && call_->call()->traced()) {
batch->is_traced = true;
}
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << "FLUSHER:queue batch to forward in closure: "
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:queue batch to forward in closure: "
<< grpc_transport_stream_op_batch_string(release_[i], false);
}
batch->handler_private.extra_arg = call_;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, call_next_op, batch,
nullptr);
@ -290,10 +286,9 @@ BaseCallData::Flusher::~Flusher() {
"flusher_batch");
}
call_closures_.RunClosuresWithoutYielding(call_->call_combiner());
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << "FLUSHER:forward batch: "
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:forward batch: "
<< grpc_transport_stream_op_batch_string(release_[0], false);
}
if (call_->call() != nullptr && call_->call()->traced()) {
release_[0]->is_traced = true;
}
@ -331,10 +326,8 @@ const char* BaseCallData::SendMessage::StateString(State state) {
}
void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.StartOp st=" << StateString(state_);
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.StartOp st=" << StateString(state_);
switch (state_) {
case State::kInitial:
state_ = State::kGotBatchNoPipe;
@ -359,10 +352,8 @@ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
template <typename T>
void BaseCallData::SendMessage::GotPipe(T* pipe_end) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.GotPipe st=" << StateString(state_);
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.GotPipe st=" << StateString(state_);
CHECK_NE(pipe_end, nullptr);
switch (state_) {
case State::kInitial:
@ -615,10 +606,9 @@ const char* BaseCallData::ReceiveMessage::StateString(State state) {
}
void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.StartOp st=" << StateString(state_);
}
switch (state_) {
case State::kInitial:
state_ = State::kForwardedBatchNoPipe;
@ -656,10 +646,9 @@ void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
template <typename T>
void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.GotPipe st=" << StateString(state_);
}
switch (state_) {
case State::kInitial:
state_ = State::kIdle;
@ -901,10 +890,9 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
case State::kPulledFromPipe: {
CHECK(push_.has_value());
if ((*push_)().ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete";
}
if (state_ == State::kCompletedWhilePulledFromPipe) {
interceptor()->Push()->Close();
state_ = State::kCancelled;
@ -1016,10 +1004,9 @@ class ClientCallData::PollContext {
void Run() {
DCHECK(HasContext<Arena>());
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << self_->LogTag() << " ClientCallData.PollContext.Run "
GRPC_TRACE_LOG(channel, INFO)
<< self_->LogTag() << " ClientCallData.PollContext.Run "
<< self_->DebugString();
}
CHECK(have_scoped_activity_);
repoll_ = false;
if (self_->send_message() != nullptr) {
@ -1664,10 +1651,8 @@ void ClientCallData::HookRecvTrailingMetadata(CapturedBatch batch) {
// - return a wrapper around PollTrailingMetadata as the promise.
ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
CallArgs call_args) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ClientCallData.MakeNextPromise "
<< DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.MakeNextPromise " << DebugString();
CHECK_NE(poll_ctx_, nullptr);
CHECK(send_initial_state_ == SendInitialState::kQueued);
send_initial_metadata_batch_->payload->send_initial_metadata
@ -1727,10 +1712,8 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
// All polls: await receiving the trailing metadata, then return it to the
// application.
Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ClientCallData.PollTrailingMetadata "
<< DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.PollTrailingMetadata " << DebugString();
CHECK_NE(poll_ctx_, nullptr);
if (send_initial_state_ == SendInitialState::kQueued) {
// First poll: pass the send_initial_metadata op down the stack.
@ -2275,10 +2258,9 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
// All polls: await sending the trailing metadata, then foward it down the
// stack.
Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag()
GRPC_TRACE_LOG(channel, INFO)
<< LogTag()
<< " PollTrailingMetadata: " << StateString(send_trailing_state_);
}
switch (send_trailing_state_) {
case SendTrailingState::kInitial:
case SendTrailingState::kQueuedBehindSendMessage:
@ -2306,10 +2288,9 @@ void ServerCallData::RecvTrailingMetadataReadyCallback(
}
void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": RecvTrailingMetadataReady error=" << error
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": RecvTrailingMetadataReady error=" << error
<< " md=" << recv_trailing_metadata_->DebugString();
}
Flusher flusher(this);
PollContext poll_ctx(this, &flusher);
Completed(error, recv_trailing_metadata_->get(GrpcTarPit()).has_value(),

@ -97,6 +97,8 @@ absl::Status AresStatusToAbslStatus(int status, absl::string_view error_msg) {
return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default:
return absl::UnknownError(error_msg);
}

@ -55,14 +55,14 @@
- name: canary_client_privacy
description:
If set, canary client privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false
- name: client_privacy
description:
If set, client privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false
@ -88,7 +88,7 @@
uses_polling: true
- name: free_large_allocator
description: If set, return all free bytes from a "big" allocator
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: [resource_quota_test]
- name: max_pings_wo_data_throttle
@ -138,7 +138,7 @@
- name: server_privacy
description:
If set, server privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false

@ -136,10 +136,9 @@ class WorkSerializer::LegacyWorkSerializer final : public WorkSerializerImpl {
void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::Run() " << this << " Scheduling callback ["
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::Run() " << this << " Scheduling callback ["
<< location.file() << ":" << location.line() << "]";
}
// Increment queue size for the new callback and owner count to attempt to
// take ownership of the WorkSerializer.
const uint64_t prev_ref_pair =
@ -405,10 +404,9 @@ void WorkSerializer::DispatchingWorkSerializer::Orphan() {
// Implementation of WorkSerializerImpl::Run
void WorkSerializer::DispatchingWorkSerializer::Run(
std::function<void()> callback, const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer[" << this << "] Scheduling callback ["
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer[" << this << "] Scheduling callback ["
<< location.file() << ":" << location.line() << "]";
}
global_stats().IncrementWorkSerializerItemsEnqueued();
MutexLock lock(&mu_);
if (!running_) {
@ -438,10 +436,9 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
// Grab the last element of processing_ - which is the next item in our
// queue since processing_ is stored in reverse order.
auto& cb = processing_.back();
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer[" << this << "] Executing callback ["
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer[" << this << "] Executing callback ["
<< cb.location.file() << ":" << cb.location.line() << "]";
}
// Run the work item.
const auto start = std::chrono::steady_clock::now();
SetCurrentThread();

@ -1125,10 +1125,8 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
goto done;
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. non-root poller " << next_worker
GRPC_TRACE_LOG(polling, INFO) << " .. non-root poller " << next_worker
<< " (root=" << root_worker << ")";
}
SET_KICK_STATE(next_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;

@ -173,10 +173,9 @@ class EventEngineEndpointWrapper {
void FinishPendingWrite(absl::Status status) {
auto* write_buffer = reinterpret_cast<SliceBuffer*>(&eeep_->write_buffer);
write_buffer->~SliceBuffer();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP: " << this << " WRITE (peer=" << PeerAddress()
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP: " << this << " WRITE (peer=" << PeerAddress()
<< ") error=" << status;
}
grpc_closure* cb = pending_write_cb_;
pending_write_cb_ = nullptr;
if (grpc_core::ExecCtx::Get() == nullptr) {

@ -30,8 +30,12 @@
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set) {
grpc_polling_entity pollent;
if (pollset_set == nullptr) {
pollent.tag = GRPC_POLLS_NONE;
} else {
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
}
return pollent;
}
@ -73,6 +77,8 @@ void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));
@ -93,6 +99,8 @@ void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));

@ -141,10 +141,9 @@ done:
static void tc_on_alarm(void* acp, grpc_error_handle error) {
int done;
async_connect* ac = static_cast<async_connect*>(acp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str
GRPC_TRACE_LOG(tcp, INFO)
<< "CLIENT_CONNECT: " << ac->addr_str
<< ": on_alarm: error=" << grpc_core::StatusToString(error);
}
gpr_mu_lock(&ac->mu);
if (ac->fd != nullptr) {
grpc_fd_shutdown(ac->fd, GRPC_ERROR_CREATE("connect() timed out"));
@ -180,10 +179,9 @@ static void on_writable(void* acp, grpc_error_handle error) {
std::string addr_str = ac->addr_str;
grpc_fd* fd;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str
GRPC_TRACE_LOG(tcp, INFO)
<< "CLIENT_CONNECT: " << ac->addr_str
<< ": on_writable: error=" << grpc_core::StatusToString(error);
}
gpr_mu_lock(&ac->mu);
CHECK(ac->fd);
@ -381,10 +379,8 @@ int64_t grpc_tcp_client_create_from_prepared_fd(
grpc_schedule_on_exec_ctx);
ac->options = options;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "CLIENT_CONNECT: " << ac->addr_str
GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str
<< ": asynchronously connecting fd " << fdobj;
}
int shard_number = connection_id % (*g_connection_shards).size();
struct ConnectionShard* shard = &(*g_connection_shards)[shard_number];

@ -669,10 +669,8 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
old_count = g_uncovered_notifications_pending--;
g_backup_poller_mu->Unlock();
CHECK_GT(old_count, 1);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " uncover cnt " << old_count << "->"
<< old_count - 1;
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " uncover cnt "
<< old_count << "->" << old_count - 1;
}
// gRPC API considers a Write operation to be done the moment it clears ‘flow
@ -705,10 +703,8 @@ static void cover_self(grpc_tcp* tcp) {
p = g_backup_poller;
g_backup_poller_mu->Unlock();
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " add " << tcp << " cnt "
<< old_count - 1 << "->" << old_count;
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " add " << tcp
<< " cnt " << old_count - 1 << "->" << old_count;
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
}
@ -731,10 +727,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << arg
<< " got_write: " << grpc_core::StatusToString(error);
}
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP:" << arg << " got_write: " << grpc_core::StatusToString(error);
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
}
@ -1129,10 +1123,8 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp
<< " got_read: " << grpc_core::StatusToString(error);
}
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP:" << tcp << " got_read: " << grpc_core::StatusToString(error);
tcp->read_mu.Lock();
grpc_error_handle tcp_read_error;
if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) {
@ -1472,10 +1464,9 @@ static bool process_errors(grpc_tcp* tcp) {
} else {
// Got a control message that is not a timestamp or zerocopy. Don't know
// how to handle this.
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "unknown control message cmsg_level:" << cmsg->cmsg_level
GRPC_TRACE_LOG(tcp, INFO)
<< "unknown control message cmsg_level:" << cmsg->cmsg_level
<< " cmsg_type:" << cmsg->cmsg_type;
}
return processed_err;
}
}

@ -177,11 +177,10 @@ static grpc_error_handle CreateEventEngineListener(
<< addr_uri.status().ToString();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
GRPC_TRACE_LOG(tcp, INFO) << "SERVER_CONNECT: incoming external "
"connection: "
<< addr_uri->c_str();
}
}
read_notifier_pollset =
(*(s->pollsets))[static_cast<size_t>(
gpr_atm_no_barrier_fetch_add(
@ -916,10 +915,8 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
LOG(ERROR) << "Invalid address: " << addr_uri.status();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
<< *addr_uri;
}
GRPC_TRACE_LOG(tcp, INFO)
<< "SERVER_CONNECT: incoming external connection: " << *addr_uri;
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);
read_notifier_pollset =

@ -766,10 +766,9 @@ double PressureTracker::AddSampleAndGetControlValue(double sample) {
} else {
report = controller_.Update(current_estimate - kSetPoint);
}
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "RQ: pressure:" << current_estimate << " report:" << report
GRPC_TRACE_LOG(resource_quota, INFO)
<< "RQ: pressure:" << current_estimate << " report:" << report
<< " controller:" << controller_.DebugString();
}
report_.store(report, std::memory_order_relaxed);
});
return report_.load(std::memory_order_relaxed);

@ -425,10 +425,8 @@ class GrpcMemoryAllocatorImpl final : public EventEngineMemoryAllocatorImpl {
void ReturnFree() {
size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel);
if (ret == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Allocator " << this << " returning " << ret
<< " bytes to quota";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Allocator " << this << " returning " << ret << " bytes to quota";
taken_bytes_.fetch_sub(ret, std::memory_order_relaxed);
memory_quota_->Return(ret);
memory_quota_->MaybeMoveAllocator(this, /*old_free_bytes=*/ret,

@ -77,10 +77,9 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
AuthorizationEngine::Decision decision =
engines.deny_engine->Evaluate(args);
if (decision.type == AuthorizationEngine::Decision::Type::kDeny) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) {
LOG(INFO) << "chand=" << this << ": request denied by policy "
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "chand=" << this << ": request denied by policy "
<< decision.matching_policy_name;
}
return false;
}
}
@ -95,10 +94,8 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
return true;
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) {
LOG(INFO) << "chand=" << this
<< ": request denied, no matching policy found.";
}
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "chand=" << this << ": request denied, no matching policy found.";
return false;
}

@ -121,10 +121,9 @@ void grpc_plugin_credentials::PendingRequest::RequestMetadataReady(
GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP);
grpc_core::RefCountedPtr<grpc_plugin_credentials::PendingRequest> r(
static_cast<grpc_plugin_credentials::PendingRequest*>(request));
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) {
LOG(INFO) << "plugin_credentials[" << r->creds() << "]: request " << r.get()
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << r->creds() << "]: request " << r.get()
<< ": plugin returned asynchronously";
}
for (size_t i = 0; i < num_md; ++i) {
grpc_metadata p;
p.key = grpc_core::CSliceRef(md[i].key);
@ -150,10 +149,9 @@ grpc_plugin_credentials::GetRequestMetadata(
RefAsSubclass<grpc_plugin_credentials>(), std::move(initial_metadata),
args);
// Invoke the plugin. The callback holds a ref to us.
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) {
LOG(INFO) << "plugin_credentials[" << this << "]: request " << request.get()
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": invoking plugin";
}
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK;
@ -168,17 +166,15 @@ grpc_plugin_credentials::GetRequestMetadata(
child_request.get(), creds_md, &num_creds_md,
&status, &error_details)) {
child_request.release();
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) {
LOG(INFO) << "plugin_credentials[" << this << "]: request "
<< request.get() << ": plugin will return asynchronously";
}
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": plugin will return asynchronously";
return [request] { return request->PollAsyncResult(); };
}
// Synchronous return.
if (GRPC_TRACE_FLAG_ENABLED(plugin_credentials)) {
LOG(INFO) << "plugin_credentials[" << this << "]: request " << request.get()
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": plugin returned synchronously";
}
auto result = request->ProcessPluginResult(creds_md, num_creds_md, status,
error_details);
// Clean up.

@ -46,17 +46,15 @@ struct grpc_slice_refcount {
void Ref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) {
LOG(INFO).AtLocation(location.file(), location.line())
GRPC_TRACE_LOG(slice_refcount, INFO)
.AtLocation(location.file(), location.line())
<< "REF " << this << " " << prev_refs << "->" << prev_refs + 1;
}
}
void Unref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) {
LOG(INFO).AtLocation(location.file(), location.line())
GRPC_TRACE_LOG(slice_refcount, INFO)
.AtLocation(location.file(), location.line())
<< "UNREF " << this << " " << prev_refs << "->" << prev_refs - 1;
}
if (prev_refs == 1) {
destroyer_fn_(this);
}

@ -337,7 +337,7 @@ void Call::HandleCompressionAlgorithmDisabled(
void Call::UpdateDeadline(Timestamp deadline) {
ReleasableMutexLock lock(&deadline_mu_);
if (GRPC_TRACE_FLAG_ENABLED(call)) {
VLOG(2) << "[call " << this
LOG(INFO) << "[call " << this
<< "] UpdateDeadline from=" << deadline_.ToString()
<< " to=" << deadline.ToString();
}

@ -71,11 +71,10 @@ Timestamp BdpEstimator::CompletePing() {
}
if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]:update_inter_time to "
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:update_inter_time to "
<< inter_ping_delay_.millis() << "ms";
}
}
ping_state_ = PingState::UNSCHEDULED;
accumulator_ = 0;
return Timestamp::Now() + inter_ping_delay_;

@ -49,10 +49,9 @@ class BdpEstimator {
// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
// transport (but not necessarily started)
void SchedulePing() {
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]:sched acc=" << accumulator_
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:sched acc=" << accumulator_
<< " est=" << estimate_;
}
CHECK(ping_state_ == PingState::UNSCHEDULED);
ping_state_ = PingState::SCHEDULED;
accumulator_ = 0;
@ -62,10 +61,9 @@ class BdpEstimator {
// once
// the ping is on the wire
void StartPing() {
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]:start acc=" << accumulator_
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:start acc=" << accumulator_
<< " est=" << estimate_;
}
CHECK(ping_state_ == PingState::SCHEDULED);
ping_state_ = PingState::STARTED;
ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);

@ -51,23 +51,24 @@ class CallSpine final : public Party {
std::move(client_initial_metadata), std::move(arena)));
}
~CallSpine() override {}
~CallSpine() override { CallOnDone(true); }
CallFilters& call_filters() { return call_filters_; }
// Add a callback to be called when server trailing metadata is received.
void OnDone(absl::AnyInvocable<void()> fn) {
void OnDone(absl::AnyInvocable<void(bool)> fn) {
if (on_done_ == nullptr) {
on_done_ = std::move(fn);
return;
}
on_done_ = [first = std::move(fn), next = std::move(on_done_)]() mutable {
first();
next();
on_done_ = [first = std::move(fn),
next = std::move(on_done_)](bool cancelled) mutable {
first(cancelled);
next(cancelled);
};
}
void CallOnDone() {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)();
void CallOnDone(bool cancelled) {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)(cancelled);
}
auto PullServerInitialMetadata() {
@ -75,7 +76,12 @@ class CallSpine final : public Party {
}
auto PullServerTrailingMetadata() {
return call_filters().PullServerTrailingMetadata();
return Map(
call_filters().PullServerTrailingMetadata(),
[this](ServerMetadataHandle result) {
CallOnDone(result->get(GrpcCallWasCancelled()).value_or(false));
return result;
});
}
auto PushClientToServerMessage(MessageHandle message) {
@ -190,7 +196,7 @@ class CallSpine final : public Party {
// Call filters/pipes part of the spine
CallFilters call_filters_;
absl::AnyInvocable<void()> on_done_{nullptr};
absl::AnyInvocable<void(bool)> on_done_{nullptr};
};
class CallInitiator {
@ -227,7 +233,9 @@ class CallInitiator {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename PromiseFactory>
void SpawnGuarded(absl::string_view name, PromiseFactory promise_factory) {
@ -274,7 +282,9 @@ class CallHandler {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise>
auto CancelIfFails(Promise promise) {
@ -327,7 +337,9 @@ class UnstartedCallHandler {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise>
auto CancelIfFails(Promise promise) {

@ -117,10 +117,9 @@ ConnectivityStateTracker::~ConnectivityStateTracker() {
void ConnectivityStateTracker::AddWatcher(
grpc_connectivity_state initial_state,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) {
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: add watcher " << watcher.get();
}
grpc_connectivity_state current_state =
state_.load(std::memory_order_relaxed);
if (initial_state != current_state) {
@ -141,10 +140,9 @@ void ConnectivityStateTracker::AddWatcher(
void ConnectivityStateTracker::RemoveWatcher(
ConnectivityStateWatcherInterface* watcher) {
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) {
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: remove watcher " << watcher;
}
watchers_.erase(watcher);
}
@ -178,10 +176,9 @@ void ConnectivityStateTracker::SetState(grpc_connectivity_state state,
grpc_connectivity_state ConnectivityStateTracker::state() const {
grpc_connectivity_state state = state_.load(std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(connectivity_state)) {
LOG(INFO) << "ConnectivityStateTracker " << name_ << "[" << this
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: get current state: " << ConnectivityStateName(state);
}
return state;
}

@ -948,10 +948,9 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
<< ": Starting LB call " << lb_call_;
}
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -1527,10 +1526,9 @@ class GrpcLb::NullLbTokenEndpointIterator final
void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback)
const override {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this
<< "] fallback address: " << endpoint.ToString();
}
callback(EndpointAddresses(endpoint.addresses(),
endpoint.args().SetObject(empty_token_)));
});
@ -1765,10 +1763,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Created new child policy handler ("
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Created new child policy handler ("
<< lb_policy.get() << ")";
}
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
@ -1819,10 +1816,9 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
child_policy_ = CreateChildPolicyLocked(update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Updating child policy handler "
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Updating child policy handler "
<< child_policy_.get();
}
// TODO(roth): If we're in fallback mode and the child policy rejects the
// update, we should propagate that failure back to the resolver somehow.
(void)child_policy_->UpdateLocked(std::move(update_args));
@ -1864,10 +1860,9 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
subchannel_cache_timer_handle_.reset();
auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] removing " << it->second.size()
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] removing " << it->second.size()
<< " subchannels from cache";
}
cached_subchannels_.erase(it);
}
if (!cached_subchannels_.empty()) {

@ -341,10 +341,9 @@ class HealthProducer::ConnectivityWatcher final
//
void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << this << ": starting with subchannel "
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this << ": starting with subchannel "
<< subchannel.get();
}
subchannel_ = std::move(subchannel);
{
MutexLock lock(&mu_);
@ -498,10 +497,9 @@ MakeHealthCheckWatcher(
health_check_service_name =
args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME);
}
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "creating HealthWatcher -- health_check_service_name=\""
GRPC_TRACE_LOG(health_check_client, INFO)
<< "creating HealthWatcher -- health_check_service_name=\""
<< health_check_service_name.value_or("N/A") << "\"";
}
return std::make_unique<HealthWatcher>(std::move(work_serializer),
std::move(health_check_service_name),
std::move(watcher));

@ -274,10 +274,8 @@ void OrcaProducer::MaybeStartStreamLocked() {
void OrcaProducer::NotifyWatchers(
const BackendMetricData& backend_metric_data) {
if (GRPC_TRACE_FLAG_ENABLED(orca_client)) {
LOG(INFO) << "OrcaProducer " << this
<< ": reporting backend metrics to watchers";
}
GRPC_TRACE_LOG(orca_client, INFO)
<< "OrcaProducer " << this << ": reporting backend metrics to watchers";
MutexLock lock(&mu_);
for (OrcaWatcher* watcher : watchers_) {
watcher->watcher()->OnBackendMetricReport(backend_metric_data);

@ -580,11 +580,10 @@ OutlierDetectionLb::OutlierDetectionLb(Args args)
}
OutlierDetectionLb::~OutlierDetectionLb() {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] destroying outlier_detection LB policy";
}
}
void OutlierDetectionLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
@ -622,10 +621,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Update outlier detection timer.
if (!config_->CountingEnabled()) {
// No need for timer. Cancel the current timer, if any.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] counting disabled, cancelling timer";
}
ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) {
// No timer running. Start it now.
@ -643,10 +641,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// with the same start time.
// Note that if the new deadline is in the past, the timer will fire
// immediately.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] interval changed, replacing timer";
}
ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime());
}
@ -663,10 +660,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Find the entry in the endpoint map.
auto it = endpoint_state_map_.find(key);
if (it == endpoint_state_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] adding endpoint entry for " << key.ToString();
}
// The endpoint is not present in the map, so we'll need to add it.
// Start by getting a pointer to the entry for each address in the
// subchannel map, creating the entry if needed.
@ -722,10 +718,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
for (auto it = endpoint_state_map_.begin();
it != endpoint_state_map_.end();) {
if (current_endpoints.find(it->first) == current_endpoints.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] removing endpoint map entry " << it->first.ToString();
}
it = endpoint_state_map_.erase(it);
} else {
++it;
@ -742,10 +737,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.config = config_->child_policy();
update_args.args = std::move(args.args);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -774,10 +768,9 @@ OrphanablePtr<LoadBalancingPolicy> OutlierDetectionLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&outlier_detection_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -842,10 +835,9 @@ OutlierDetectionLb::EjectionTimer::EjectionTimer(
RefCountedPtr<OutlierDetectionLb> parent, Timestamp start_time)
: parent_(std::move(parent)), start_time_(start_time) {
auto interval = parent_->config_->outlier_detection_config().interval;
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer will run in " << interval.ToString();
}
timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter(
interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -868,10 +860,9 @@ void OutlierDetectionLb::EjectionTimer::Orphan() {
void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!timer_handle_.has_value()) return;
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer running";
}
std::map<EndpointState*, double> success_rate_ejection_candidates;
std::map<EndpointState*, double> failure_percentage_ejection_candidates;
size_t ejected_host_count = 0;
@ -967,10 +958,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
}
candidate.first->Eject(time_now);
++ejected_host_count;
}
@ -1014,10 +1004,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
}
candidate.first->Eject(time_now);
++ejected_host_count;
}

@ -631,10 +631,10 @@ void PickFirst::GoIdle() {
void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -672,10 +672,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
: subchannel_data_(subchannel_data),
pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get() << "): starting watch";
}
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get();
subchannel_->WatchConnectivityState(std::move(watcher));
@ -696,10 +695,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
}
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << pick_first_.get() << " selected subchannel "
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << pick_first_.get() << " selected subchannel "
<< subchannel_.get();
}
CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_);
@ -1006,10 +1004,9 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1037,17 +1034,13 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
}
PickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void PickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
// Cancel Happy Eyeballs timer, if any.
@ -1089,10 +1082,9 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution();
absl::Status status = absl::UnavailableError(
@ -1564,10 +1556,10 @@ void OldPickFirst::UnsetSelectedSubchannel() {
void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -1912,10 +1904,8 @@ void OldPickFirst::SubchannelList::SubchannelData::
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Cases 1 and 2.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " selected subchannel "
<< subchannel_.get();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " selected subchannel " << subchannel_.get();
p->selected_ = this;
// If health checking is enabled, start the health watch, but don't
// report a new picker -- we want to stay in CONNECTING while we wait
@ -1957,10 +1947,9 @@ OldPickFirst::SubchannelList::SubchannelList(
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1987,17 +1976,13 @@ OldPickFirst::SubchannelList::SubchannelList(
}
OldPickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void OldPickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
for (auto& sd : subchannels_) {
@ -2041,10 +2026,9 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us.

@ -402,10 +402,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
// Create child if needed.
if (child == nullptr) {
@ -461,10 +460,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
CHECK(child != nullptr);
if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) {
@ -626,19 +624,17 @@ void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked() {
PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get()
<< "] creating child " << name_ << " (" << this << ")";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] creating child "
<< name_ << " (" << this << ")";
// Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
}
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): orphaned";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): orphaned";
failover_timer_.reset();
deactivation_timer_.reset();
// Remove the child policy's interested_parties pollset_set from the
@ -665,10 +661,9 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config,
bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): start update";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): start update";
ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed.
if (child_policy_ == nullptr) {

@ -655,10 +655,9 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
}
});
} else {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] received update with addresses error: "
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "] received update with addresses error: "
<< args.addresses.status();
}
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (!endpoints_.empty()) return args.addresses.status();

@ -1039,10 +1039,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
BuildKeyMap(config_->key_builder_map(), args.path,
lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)};
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": request keys: " << key.ToString();
}
Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) {
@ -1077,10 +1076,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
if (entry != nullptr) {
// If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": using cache entry " << entry;
}
return entry->Pick(args);
}
// If the entry is in backoff, then use the default target if set,
@ -1093,29 +1091,26 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
}
}
// RLS call pending. Queue the pick.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": RLS request pending; queuing pick";
}
return PickResult::Queue();
}
LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; using default target";
}
auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
config_->default_target(), pick_result);
return pick_result;
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; failing pick";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << reason
<< "; failing pick";
auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup();
stats_plugins.AddCounter(kMetricFailedPicks, 1,
@ -1204,10 +1199,9 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
lb_policy_->cache_.lru_list_.end(), key)) {}
void RlsLb::Cache::Entry::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": cache entry evicted";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": cache entry evicted";
is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
@ -1403,26 +1397,22 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key);
map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": cache entry added, entry=" << entry;
}
return entry;
}
// Entry found, so use it.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": found cache entry " << it->second.get();
}
it->second->MarkUsed();
return it->second.get();
}
void RlsLb::Cache::Resize(size_t bytes) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes
<< " bytes";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
size_limit_ = bytes;
MaybeShrinkSize(size_limit_);
}
@ -1507,10 +1497,9 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
auto map_it = map_.find(*lru_it);
CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString();
}
size_ -= map_it->second->Size();
map_.erase(map_it);
}
@ -1648,10 +1637,9 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
}
void RlsLb::RlsChannel::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ", channel=" << channel_.get() << ": shutdown";
}
is_shutdown_ = true;
if (channel_ != nullptr) {
// Remove channelz linkage.
@ -1715,10 +1703,9 @@ RlsLb::RlsRequest::RlsRequest(
backoff_state_(std::move(backoff_state)),
reason_(reason),
stale_header_data_(std::move(stale_header_data)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< ": RLS request created for key " << key_.ToString();
}
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run(
DEBUG_LOCATION,
@ -1731,10 +1718,9 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString() << ": cancelling RLS call";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
<< key_.ToString() << ": cancelling RLS call";
grpc_call_cancel_internal(call_);
}
Unref(DEBUG_LOCATION, "Orphan");
@ -2031,10 +2017,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
config_->default_target());
created_default_child = true;
} else {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] using existing child for default target";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] using existing child for default target";
default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
}
@ -2063,10 +2047,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
p.second->StartUpdate();
}
} else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] starting default child policy update";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] starting default child policy update";
default_child_policy_->StartUpdate();
}
}
@ -2084,10 +2066,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
}
}
} else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] finishing default child policy update";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] finishing default child policy update";
absl::Status status = default_child_policy_->MaybeFinishUpdate();
if (!status.ok()) {
errors.emplace_back(absl::StrCat("target ", config_->default_target(),
@ -2186,10 +2166,9 @@ void RlsLb::UpdatePickerLocked() {
if (is_shutdown_) return;
for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] target " << p.second->target()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] target " << p.second->target()
<< " in state " << ConnectivityStateName(child_state);
}
if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY;
break;
@ -2208,10 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
}
}
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] reporting state "
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] reporting state "
<< ConnectivityStateName(state);
}
absl::Status status;
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
status = absl::UnavailableError("no children available");

@ -246,10 +246,9 @@ absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
}
addresses = args.addresses->get();
} else {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] received update with address error: "
<< args.addresses.status();
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << this
<< "] received update with address error: " << args.addresses.status();
// If we already have a child list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -313,10 +312,9 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
<< status << ")";
}
if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin << "] child " << this
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] child " << this
<< " reported IDLE; requesting connection";
}
ExitIdleLocked();
}
// If state changed, update state counters.
@ -396,10 +394,9 @@ void RoundRobin::RoundRobinEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin << "] reporting READY with child list "
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] reporting READY with child list "
<< this;
}
std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers;
for (const auto& endpoint : endpoints()) {
auto state = endpoint->connectivity_state();
@ -412,10 +409,9 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_READY, absl::OkStatus(),
MakeRefCounted<Picker>(round_robin, std::move(pickers)));
} else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin
<< "] reporting CONNECTING with child list " << this;
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] reporting CONNECTING with child list "
<< this;
round_robin->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -567,18 +567,14 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
}
WeightedRoundRobin::Picker::~Picker() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] destroying picker";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this << "] destroying picker";
}
void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] cancelling timer";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this << "] cancelling timer";
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
wrr_.reset();
@ -644,25 +640,22 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
stats_plugins.AddCounter(kMetricEndpointWeightStale, num_stale,
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new weights: " << absl::StrJoin(weights, " ");
}
auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
std::shared_ptr<StaticStrideScheduler> scheduler;
if (scheduler_or.has_value()) {
scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new scheduler: " << scheduler.get();
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] no scheduler, falling back to RR";
}
stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
@ -690,10 +683,9 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{
MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << self->wrr_.get() << " picker "
<< self.get() << "] timer fired";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << self->wrr_.get() << " picker " << self.get()
<< "] timer fired";
self->BuildSchedulerAndStartTimerLocked();
}
}
@ -715,11 +707,10 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
locality_name_(channel_args()
.GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD)
.value_or("")) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] Created -- locality_name=\""
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Created -- locality_name=\""
<< std::string(locality_name_) << "\"";
}
}
WeightedRoundRobin::~WeightedRoundRobin() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
@ -778,10 +769,9 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
std::make_shared<EndpointAddressesListIterator>(EndpointAddressesList(
ordered_addresses.begin(), ordered_addresses.end()));
} else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] received update with address error: "
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] received update with address error: "
<< args.addresses.status().ToString();
}
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -893,10 +883,9 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
<< status << ")";
}
if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr << "] child " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] child " << this
<< " reported IDLE; requesting connection";
}
ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) {
// If we transition back to READY state, restart the blackout period.
@ -990,18 +979,15 @@ void WeightedRoundRobin::WrrEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr << "] reporting READY with endpoint list "
<< this;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting READY with endpoint list " << this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(),
MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this));
} else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr
<< "] reporting CONNECTING with endpoint list " << this;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting CONNECTING with endpoint list "
<< this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -290,11 +290,10 @@ WeightedTargetLb::WeightedTargetLb(Args args)
}
WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this
<< "] destroying weighted_target LB policy";
}
}
void WeightedTargetLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
@ -382,10 +381,9 @@ void WeightedTargetLb::UpdateStateLocked() {
// all children. This avoids unnecessary picker churn while an update
// is being propagated to our children.
if (update_in_progress_) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this
<< "] scanning children to determine connectivity state";
}
// Construct lists of child pickers with associated weights, one for
// children that are in state READY and another for children that are
// in state TRANSIENT_FAILURE. Each child is represented by a portion of
@ -450,10 +448,9 @@ void WeightedTargetLb::UpdateStateLocked() {
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] connectivity changed to "
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] connectivity changed to "
<< ConnectivityStateName(connectivity_state);
}
RefCountedPtr<SubchannelPicker> picker;
absl::Status status;
switch (connectivity_state) {
@ -525,11 +522,10 @@ WeightedTargetLb::WeightedChild::WeightedChild(
: weighted_target_policy_(std::move(weighted_target_policy)),
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] created WeightedChild " << this << " for " << name_;
}
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
@ -657,10 +653,9 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": deactivating";
}
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.

@ -662,10 +662,9 @@ Json CdsLb::CreateChildPolicyConfigForLeafCluster(
{"outlier_detection_experimental",
Json::FromObject(std::move(outlier_detection_config))},
})});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: "
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(outlier_detection_policy, /*indent=*/1);
}
return outlier_detection_policy;
}
@ -695,10 +694,9 @@ Json CdsLb::CreateChildPolicyConfigForAggregateCluster(
{"priorities", Json::FromArray(std::move(priority_priorities))},
})},
})});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: "
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(json, /*indent=*/1);
}
return json;
}
@ -714,10 +712,8 @@ void CdsLb::ResetState() {
}
void CdsLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] reporting TRANSIENT_FAILURE: " << status;
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,

@ -404,11 +404,10 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_config_(xds_cluster_impl_lb->drop_config_),
drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << xds_cluster_impl_lb
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << xds_cluster_impl_lb
<< "] constructed new picker " << this;
}
}
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
LoadBalancingPolicy::PickArgs args) {
@ -500,18 +499,16 @@ LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client,
Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] created -- using xds client " << xds_client_.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] created -- using xds client "
<< xds_client_.get();
}
XdsClusterImplLb::~XdsClusterImplLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] destroying xds_cluster_impl LB policy";
}
}
void XdsClusterImplLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
@ -537,10 +534,9 @@ void XdsClusterImplLb::ResetState() {
}
void XdsClusterImplLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -769,10 +765,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_impl_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -796,10 +791,9 @@ absl::Status XdsClusterImplLb::UpdateChildPolicyLocked(
update_args.args =
args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name());
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}

@ -249,11 +249,10 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
: LoadBalancingPolicy(std::move(args)) {}
XdsClusterManagerLb::~XdsClusterManagerLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this
<< "] destroying xds_cluster_manager LB policy";
}
}
void XdsClusterManagerLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
@ -406,17 +405,15 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
: xds_cluster_manager_policy_(std::move(xds_cluster_manager_policy)),
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] created ClusterChild " << this << " for " << name_;
}
}
XdsClusterManagerLb::ClusterChild::~ClusterChild() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << ": destroying child";
}
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
}

@ -464,11 +464,10 @@ XdsOverrideHostLb::Picker::Picker(
: policy_(std::move(xds_override_host_lb)),
picker_(std::move(picker)),
override_host_health_status_set_(override_host_health_status_set) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get()
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get()
<< "] constructed new picker " << this;
}
}
absl::optional<LoadBalancingPolicy::PickResult>
XdsOverrideHostLb::Picker::PickOverridenHost(
@ -605,10 +604,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
// Min time between timer runs is 5s so that we don't kill ourselves
// with lock contention and CPU usage due to sweeps over the map.
duration = std::max(duration, Duration::Seconds(5));
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": subchannel cleanup pass will run in " << duration;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": subchannel cleanup pass will run in " << duration;
timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter(
duration, [self = RefAsSubclass<IdleTimer>()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -622,10 +620,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
void XdsOverrideHostLb::IdleTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": cancelling";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": cancelling";
policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
}
@ -635,10 +632,9 @@ void XdsOverrideHostLb::IdleTimer::Orphan() {
void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": timer fired";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": timer fired";
policy_->CleanupSubchannels();
}
}
@ -655,11 +651,10 @@ XdsOverrideHostLb::XdsOverrideHostLb(Args args)
}
XdsOverrideHostLb::~XdsOverrideHostLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] destroying xds_override_host LB policy";
}
}
void XdsOverrideHostLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
@ -695,10 +690,9 @@ void XdsOverrideHostLb::ResetState() {
}
void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -790,11 +784,10 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
args.addresses =
std::make_shared<ChildEndpointIterator>(std::move(*args.addresses));
} else {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] address error: " << args.addresses.status();
}
}
// Create child policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args.args);
@ -805,10 +798,9 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.config = new_config->child_config();
update_args.args = args_;
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -837,10 +829,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsOverrideHostLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_override_host_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -877,10 +868,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
for (const auto& address : endpoint.addresses()) {
auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false);
if (!key.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] no key for endpoint address; not adding to map";
}
} else {
addresses.push_back(*std::move(key));
}
@ -907,10 +897,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
MutexLock lock(&mu_);
for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) {
if (addresses_for_map.find(it->first) == addresses_for_map.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] removing map key "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] removing map key "
<< it->first;
}
it->second->UnsetSubchannel(&subchannel_refs_to_drop);
it = subchannel_map_.erase(it);
} else {
@ -922,10 +911,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto& address_info = p.second;
auto it = subchannel_map_.find(address);
if (it == subchannel_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] adding map key "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] adding map key "
<< address;
}
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first;
}
@ -973,10 +961,9 @@ XdsOverrideHostLb::AdoptSubchannel(
}
void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] creating owned subchannel for " << address;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] creating owned subchannel for "
<< address;
auto addr = StringToSockaddr(address);
CHECK(addr.ok());
// Note: We don't currently have any cases where per_address_args need to
@ -1016,10 +1003,9 @@ void XdsOverrideHostLb::CleanupSubchannels() {
if (p.second->last_used_time() <= idle_threshold) {
auto subchannel = p.second->TakeOwnedSubchannel();
if (subchannel != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] dropping subchannel for " << p.first;
}
subchannel_refs_to_drop.push_back(std::move(subchannel));
}
} else {
@ -1093,10 +1079,9 @@ void XdsOverrideHostLb::SubchannelWrapper::CancelConnectivityStateWatch(
}
void XdsOverrideHostLb::SubchannelWrapper::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get()
<< "] subchannel wrapper " << this << " orphaned";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] subchannel wrapper "
<< this << " orphaned";
if (!IsWorkSerializerDispatchEnabled()) {
wrapped_subchannel()->CancelConnectivityStateWatch(watcher_);
if (subchannel_entry_ != nullptr) {
@ -1206,20 +1191,19 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
auto* subchannel = GetSubchannel();
if (subchannel != wrapper) return;
if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb] removing unowned subchannel wrapper "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] removing unowned subchannel "
"wrapper "
<< subchannel;
}
subchannel_ = nullptr;
} else {
// The subchannel is being released by the child policy, but it
// is still within its idle timeout, so we make a new copy of
// the wrapper with the same underlying subchannel, and we hold
// our own ref to it.
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb] subchannel wrapper " << subchannel
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] subchannel wrapper " << subchannel
<< ": cloning to gain ownership";
}
subchannel_ = wrapper->Clone();
}
}

@ -239,10 +239,9 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.args = std::move(args.args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] updating child policy "
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] updating child policy "
<< child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -256,10 +255,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsWrrLocalityLb::CreateChildPolicyLocked(
auto lb_policy =
CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy(
"weighted_target_experimental", std::move(lb_policy_args));
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this
<< "] created new child policy " << lb_policy.get();
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] created new child policy "
<< lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this LB policy, which in turn is tied to the application's call.

@ -181,6 +181,22 @@ class GrpcAresQuery final {
const std::string name_;
};
static absl::Status AresStatusToAbslStatus(int status,
absl::string_view error_msg) {
switch (status) {
case ARES_ECANCELLED:
return absl::CancelledError(error_msg);
case ARES_ENOTIMP:
return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default:
return absl::UnknownError(error_msg);
}
}
static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
@ -715,8 +731,8 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
hr->qtype, hr->host, hr->is_balancer, ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked: %s", r,
error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
destroy_hostbyname_request_locked(hr);
}
@ -761,8 +777,8 @@ static void on_srv_query_done_locked(void* arg, int status, int /*timeouts*/,
ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked: %s", r,
error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
delete q;
}
@ -780,7 +796,6 @@ static void on_txt_done_locked(void* arg, int status, int /*timeouts*/,
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
grpc_error_handle error;
if (status != ARES_SUCCESS) goto fail;
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked name=%s ARES_SUCCESS", r,
q->name().c_str());
@ -824,8 +839,8 @@ fail:
q->name(), ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r,
error_msg.c_str());
error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error =
grpc_error_add_child(AresStatusToAbslStatus(status, error_msg), r->error);
}
grpc_error_handle set_request_dns_server(grpc_ares_request* r,

@ -106,6 +106,20 @@ Histogram_10000_20 operator-(const Histogram_10000_20& left,
}
return result;
}
void HistogramCollector_1800000_40::Collect(
Histogram_1800000_40* result) const {
for (int i = 0; i < 40; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right) {
Histogram_1800000_40 result;
for (int i = 0; i < 40; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
const absl::string_view
GlobalStats::counter_name[static_cast<int>(Counter::COUNT)] = {
"client_calls_created",
@ -123,6 +137,8 @@ const absl::string_view
"http2_writes_begun",
"http2_transport_stalls",
"http2_stream_stalls",
"http2_hpack_hits",
"http2_hpack_misses",
"cq_pluck_creates",
"cq_next_creates",
"cq_callback_creates",
@ -161,6 +177,8 @@ const absl::string_view GlobalStats::counter_doc[static_cast<int>(
"control window",
"Number of times sending was completely stalled by the stream flow control "
"window",
"Number of HPACK cache hits",
"Number of HPACK cache misses (entries added but never used)",
"Number of completion queues created for cq_pluck (indicates sync api "
"usage)",
"Number of completion queues created for cq_next (indicates cq async api "
@ -192,6 +210,7 @@ const absl::string_view
"tcp_read_offer_iov_size",
"http2_send_message_size",
"http2_metadata_size",
"http2_hpack_entry_lifetime",
"wrr_subchannel_list_size",
"wrr_subchannel_ready_size",
"work_serializer_run_time_ms",
@ -223,6 +242,7 @@ const absl::string_view GlobalStats::histogram_doc[static_cast<int>(
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
"Number of bytes consumed by metadata, according to HPACK accounting rules",
"Lifetime of HPACK entries in the cache (in milliseconds)",
"Number of subchannels in a subchannel list at picker creation time",
"Number of READY subchannels in a subchannel list at picker creation time",
"Number of milliseconds work serializers run for",
@ -278,6 +298,15 @@ const int kStatsTable10[21] = {0, 1, 2, 4, 7, 12, 19,
const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8,
9, 9, 10, 11, 12, 12, 13, 14,
15, 15, 16, 17, 18, 18, 19};
const int kStatsTable12[41] = {
0, 1, 2, 3, 5, 8, 12, 18, 26,
37, 53, 76, 108, 153, 217, 308, 436, 617,
873, 1235, 1748, 2473, 3499, 4950, 7003, 9907, 14015,
19825, 28044, 39670, 56116, 79379, 112286, 158835, 224680, 317821,
449574, 635945, 899575, 1272492, 1800000};
const uint8_t kStatsTable13[37] = {
4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
} // namespace
int Histogram_100000_20::BucketFor(int value) {
if (value < 3) {
@ -405,6 +434,29 @@ int Histogram_10000_20::BucketFor(int value) {
}
}
}
int Histogram_1800000_40::BucketFor(int value) {
if (value < 4) {
if (value < 0) {
return 0;
} else {
return value;
}
} else {
if (value < 1048577) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable13[((val.uint - 4616189618054758400ull) >> 51)];
return bucket - (value < kStatsTable12[bucket]);
} else {
if (value < 1272492) {
return 38;
} else {
return 39;
}
}
}
}
GlobalStats::GlobalStats()
: client_calls_created{0},
server_calls_created{0},
@ -421,6 +473,8 @@ GlobalStats::GlobalStats()
http2_writes_begun{0},
http2_transport_stalls{0},
http2_stream_stalls{0},
http2_hpack_hits{0},
http2_hpack_misses{0},
cq_pluck_creates{0},
cq_next_creates{0},
cq_callback_creates{0},
@ -466,6 +520,9 @@ HistogramView GlobalStats::histogram(Histogram which) const {
case Histogram::kHttp2MetadataSize:
return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26,
http2_metadata_size.buckets()};
case Histogram::kHttp2HpackEntryLifetime:
return HistogramView{&Histogram_1800000_40::BucketFor, kStatsTable12, 40,
http2_hpack_entry_lifetime.buckets()};
case Histogram::kWrrSubchannelListSize:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
wrr_subchannel_list_size.buckets()};
@ -560,6 +617,10 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.http2_transport_stalls.load(std::memory_order_relaxed);
result->http2_stream_stalls +=
data.http2_stream_stalls.load(std::memory_order_relaxed);
result->http2_hpack_hits +=
data.http2_hpack_hits.load(std::memory_order_relaxed);
result->http2_hpack_misses +=
data.http2_hpack_misses.load(std::memory_order_relaxed);
result->cq_pluck_creates +=
data.cq_pluck_creates.load(std::memory_order_relaxed);
result->cq_next_creates +=
@ -598,6 +659,8 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.tcp_read_offer_iov_size.Collect(&result->tcp_read_offer_iov_size);
data.http2_send_message_size.Collect(&result->http2_send_message_size);
data.http2_metadata_size.Collect(&result->http2_metadata_size);
data.http2_hpack_entry_lifetime.Collect(
&result->http2_hpack_entry_lifetime);
data.wrr_subchannel_list_size.Collect(&result->wrr_subchannel_list_size);
data.wrr_subchannel_ready_size.Collect(&result->wrr_subchannel_ready_size);
data.work_serializer_run_time_ms.Collect(
@ -664,6 +727,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_transport_stalls =
http2_transport_stalls - other.http2_transport_stalls;
result->http2_stream_stalls = http2_stream_stalls - other.http2_stream_stalls;
result->http2_hpack_hits = http2_hpack_hits - other.http2_hpack_hits;
result->http2_hpack_misses = http2_hpack_misses - other.http2_hpack_misses;
result->cq_pluck_creates = cq_pluck_creates - other.cq_pluck_creates;
result->cq_next_creates = cq_next_creates - other.cq_next_creates;
result->cq_callback_creates = cq_callback_creates - other.cq_callback_creates;
@ -695,6 +760,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_send_message_size =
http2_send_message_size - other.http2_send_message_size;
result->http2_metadata_size = http2_metadata_size - other.http2_metadata_size;
result->http2_hpack_entry_lifetime =
http2_hpack_entry_lifetime - other.http2_hpack_entry_lifetime;
result->wrr_subchannel_list_size =
wrr_subchannel_list_size - other.wrr_subchannel_list_size;
result->wrr_subchannel_ready_size =

@ -35,6 +35,7 @@ class Histogram_100000_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100000_20 operator-(const Histogram_100000_20& left,
const Histogram_100000_20& right);
@ -58,6 +59,7 @@ class Histogram_65536_26 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 26; }
friend Histogram_65536_26 operator-(const Histogram_65536_26& left,
const Histogram_65536_26& right);
@ -81,6 +83,7 @@ class Histogram_100_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100_20 operator-(const Histogram_100_20& left,
const Histogram_100_20& right);
@ -104,6 +107,7 @@ class Histogram_16777216_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_16777216_20 operator-(const Histogram_16777216_20& left,
const Histogram_16777216_20& right);
@ -127,6 +131,7 @@ class Histogram_80_10 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 10; }
friend Histogram_80_10 operator-(const Histogram_80_10& left,
const Histogram_80_10& right);
@ -150,6 +155,7 @@ class Histogram_10000_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_10000_20 operator-(const Histogram_10000_20& left,
const Histogram_10000_20& right);
@ -168,6 +174,30 @@ class HistogramCollector_10000_20 {
private:
std::atomic<uint64_t> buckets_[20]{};
};
class HistogramCollector_1800000_40;
class Histogram_1800000_40 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 40; }
friend Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right);
private:
friend class HistogramCollector_1800000_40;
uint64_t buckets_[40]{};
};
class HistogramCollector_1800000_40 {
public:
void Increment(int value) {
buckets_[Histogram_1800000_40::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_1800000_40* result) const;
private:
std::atomic<uint64_t> buckets_[40]{};
};
struct GlobalStats {
enum class Counter {
kClientCallsCreated,
@ -185,6 +215,8 @@ struct GlobalStats {
kHttp2WritesBegun,
kHttp2TransportStalls,
kHttp2StreamStalls,
kHttp2HpackHits,
kHttp2HpackMisses,
kCqPluckCreates,
kCqNextCreates,
kCqCallbackCreates,
@ -213,6 +245,7 @@ struct GlobalStats {
kTcpReadOfferIovSize,
kHttp2SendMessageSize,
kHttp2MetadataSize,
kHttp2HpackEntryLifetime,
kWrrSubchannelListSize,
kWrrSubchannelReadySize,
kWorkSerializerRunTimeMs,
@ -259,6 +292,8 @@ struct GlobalStats {
uint64_t http2_writes_begun;
uint64_t http2_transport_stalls;
uint64_t http2_stream_stalls;
uint64_t http2_hpack_hits;
uint64_t http2_hpack_misses;
uint64_t cq_pluck_creates;
uint64_t cq_next_creates;
uint64_t cq_callback_creates;
@ -287,6 +322,7 @@ struct GlobalStats {
Histogram_80_10 tcp_read_offer_iov_size;
Histogram_16777216_20 http2_send_message_size;
Histogram_65536_26 http2_metadata_size;
Histogram_1800000_40 http2_hpack_entry_lifetime;
Histogram_10000_20 wrr_subchannel_list_size;
Histogram_10000_20 wrr_subchannel_ready_size;
Histogram_100000_20 work_serializer_run_time_ms;
@ -367,6 +403,12 @@ class GlobalStatsCollector {
data_.this_cpu().http2_stream_stalls.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementHttp2HpackHits() {
data_.this_cpu().http2_hpack_hits.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2HpackMisses() {
data_.this_cpu().http2_hpack_misses.fetch_add(1, std::memory_order_relaxed);
}
void IncrementCqPluckCreates() {
data_.this_cpu().cq_pluck_creates.fetch_add(1, std::memory_order_relaxed);
}
@ -447,6 +489,9 @@ class GlobalStatsCollector {
void IncrementHttp2MetadataSize(int value) {
data_.this_cpu().http2_metadata_size.Increment(value);
}
void IncrementHttp2HpackEntryLifetime(int value) {
data_.this_cpu().http2_hpack_entry_lifetime.Increment(value);
}
void IncrementWrrSubchannelListSize(int value) {
data_.this_cpu().wrr_subchannel_list_size.Increment(value);
}
@ -526,6 +571,8 @@ class GlobalStatsCollector {
std::atomic<uint64_t> http2_writes_begun{0};
std::atomic<uint64_t> http2_transport_stalls{0};
std::atomic<uint64_t> http2_stream_stalls{0};
std::atomic<uint64_t> http2_hpack_hits{0};
std::atomic<uint64_t> http2_hpack_misses{0};
std::atomic<uint64_t> cq_pluck_creates{0};
std::atomic<uint64_t> cq_next_creates{0};
std::atomic<uint64_t> cq_callback_creates{0};
@ -551,6 +598,7 @@ class GlobalStatsCollector {
HistogramCollector_80_10 tcp_read_offer_iov_size;
HistogramCollector_16777216_20 http2_send_message_size;
HistogramCollector_65536_26 http2_metadata_size;
HistogramCollector_1800000_40 http2_hpack_entry_lifetime;
HistogramCollector_10000_20 wrr_subchannel_list_size;
HistogramCollector_10000_20 wrr_subchannel_ready_size;
HistogramCollector_100000_20 work_serializer_run_time_ms;

@ -80,6 +80,14 @@
max: 65536
buckets: 26
doc: Number of bytes consumed by metadata, according to HPACK accounting rules
- counter: http2_hpack_hits
doc: Number of HPACK cache hits
- counter: http2_hpack_misses
doc: Number of HPACK cache misses (entries added but never used)
- histogram: http2_hpack_entry_lifetime
doc: Lifetime of HPACK entries in the cache (in milliseconds)
max: 1800000
buckets: 40
# completion queues
- counter: cq_pluck_creates
doc: Number of completion queues created for cq_pluck (indicates sync api usage)

@ -28,6 +28,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/transport_security_grpc.h"
@ -210,6 +211,8 @@ static tsi_result tsi_fake_frame_decode(const unsigned char* incoming_bytes,
frame->offset += to_read_size;
available_size -= to_read_size;
frame->size = load32_little_endian(frame->data);
if (frame->size < 4) return TSI_DATA_CORRUPTED;
if (frame->size > 16 * 1024 * 1024) return TSI_DATA_CORRUPTED;
tsi_fake_frame_ensure_size(frame);
}

@ -0,0 +1,86 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#define GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
namespace grpc_core {
// Like std::unique_ptr, but also includes a small bitset stored in the lower
// bits of the underlying T*.
template <typename T, size_t kBits>
class UniquePtrWithBitset {
public:
UniquePtrWithBitset() : p_(0) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::nullptr_t) : p_(0) {}
explicit UniquePtrWithBitset(T* p) : p_(reinterpret_cast<uintptr_t>(p)) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::unique_ptr<T>&& p)
: UniquePtrWithBitset(p.release()) {}
~UniquePtrWithBitset() {
DCHECK_LE(kBits, static_cast<size_t>(absl::countr_zero(alignof(T))));
delete get();
}
UniquePtrWithBitset(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset& operator=(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset(UniquePtrWithBitset&& other) noexcept
: p_(std::exchange(other.p_, 0)) {}
UniquePtrWithBitset& operator=(UniquePtrWithBitset&& other) noexcept {
p_ = std::exchange(other.p_, 0);
return *this;
}
T* get() const { return reinterpret_cast<T*>(p_ & ~kBitMask); }
T* operator->() const { return get(); }
T& operator*() const { return *get(); }
explicit operator bool() const { return get() != nullptr; }
void reset(T* p = nullptr) {
uintptr_t bits = p_ & kBitMask;
delete get();
p_ = reinterpret_cast<uintptr_t>(p) | bits;
}
void SetBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ |= 1 << bit;
}
void ClearBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ &= ~(1 << bit);
}
bool TestBit(size_t bit) const {
DCHECK_LT(bit, kBits);
return p_ & (1 << bit);
}
friend bool operator==(const UniquePtrWithBitset& a,
const UniquePtrWithBitset& b) {
return a.p_ == b.p_;
}
private:
static constexpr uintptr_t kBitMask = (1 << kBits) - 1;
uintptr_t p_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H

@ -1564,7 +1564,8 @@ XdsClient::XdsClient(
}
CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) {
LOG(INFO) << "[xds_client " << this
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
}
}

@ -44,8 +44,8 @@ module GRPC
include Core::CallOps
extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
:trailing_metadata, :status
def_delegators :@call, :cancel, :cancel_with_status, :metadata,
:write_flag, :write_flag=, :trailing_metadata, :status
# client_invoke begins a client invocation.
#
@ -620,6 +620,8 @@ module GRPC
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
def start_call(metadata = {})
# TODO(apolcyn): we should cancel and clean up the call in case this
# send initial MD op fails.
merge_metadata_to_send(metadata) && send_initial_metadata
end
@ -665,9 +667,10 @@ module GRPC
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
# TODO(apolcyn): expose peer getter
Operation = view_class(:cancel, :cancel_with_status, :cancelled?, :deadline,
:execute, :metadata, :status, :start_call, :wait,
:write_flag, :write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline

@ -90,69 +90,78 @@ describe GRPC::Core::Call do
describe '#status' do
it 'can save the status and read it back' do
call = make_test_call
make_test_call do |call|
sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts)
end
end
it 'must be set to a status' do
call = make_test_call
make_test_call do |call|
bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
make_test_call do |call|
expect { call.status = nil }.not_to raise_error
end
end
end
describe '#metadata' do
it 'can save the metadata hash and read it back' do
call = make_test_call
make_test_call do |call|
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
end
end
it 'must be set with a hash' do
call = make_test_call
make_test_call do |call|
bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
make_test_call do |call|
expect { call.metadata = nil }.not_to raise_error
end
end
end
describe '#set_credentials!' do
it 'can set a valid CallCredentials object' do
call = make_test_call
make_test_call do |call|
auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error
end
end
end
describe '#cancel' do
it 'completes ok' do
call = make_test_call
make_test_call do |call|
expect { call.cancel }.not_to raise_error
end
end
it 'completes ok when the call is closed' do
call = make_test_call
make_test_call do |call|
call.close
expect { call.cancel }.not_to raise_error
end
end
end
describe '#cancel_with_status' do
it 'completes ok' do
call = make_test_call
make_test_call do |call|
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
@ -160,18 +169,22 @@ describe GRPC::Core::Call do
call.cancel_with_status(0, nil)
end.to raise_error(TypeError)
end
end
it 'completes ok when the call is closed' do
call = make_test_call
make_test_call do |call|
call.close
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
end
end
end
def make_test_call
@ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = @ch.create_call(nil, nil, 'phony_method', nil, deadline)
yield call
call.close
end
def deadline

@ -118,7 +118,8 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
call.close
end
expect(&blk).to_not raise_error
end
@ -132,8 +133,9 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
STDERR.puts "#{Time.now}: created call"
call.close
end
expect(&blk).to raise_error(RuntimeError)
STDERR.puts "#{Time.now}: finished: raises an error if called on a closed channel"

@ -16,36 +16,8 @@ require 'spec_helper'
include GRPC::Core
shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
def deadline
Time.now + 5
end
def server_allows_client_to_proceed(metadata = {})
recvd_rpc = @server.request_call
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
ops = { CallOps::SEND_INITIAL_METADATA => metadata }
server_batch = server_call.run_batch(ops)
expect(server_batch.send_metadata).to be true
server_call
end
def new_client_call
@ch.create_call(nil, nil, '/method', nil, deadline)
end
def ok_status
Struct::Status.new(StatusCodes::OK, 'OK')
end
end
shared_examples 'basic GRPC message delivery is OK' do
include GRPC::Core
include_context 'setup: tags'
context 'the test channel' do
it 'should have a target' do
@ -53,272 +25,45 @@ shared_examples 'basic GRPC message delivery is OK' do
end
end
context 'a client call' do
it 'should have a peer' do
expect(new_client_call.peer).to be_a(String)
end
end
it 'calls have peer info' do
call = new_client_call
expect(call.peer).to be_a(String)
it 'unary calls work' do
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
end
it 'servers receive requests from clients and can respond' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'responses written by servers are received by the client' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq(reply_text)
expect(final_client_batch.status.code).to eq(0)
end
it 'compressed messages can be sent and received' do
call = new_client_call
server_call = nil
it 'unary calls work when enabling compression' do
run_services_on_server(@server, services: [EchoService]) do
long_request_str = '0' * 2000
long_response_str = '1' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
server_thread = Thread.new do
server_call = server_allows_client_to_proceed(md)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_MESSAGE => long_request_str,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(long_request_str)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => long_response_str,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq long_response_str
expect(final_client_batch.status.code).to eq(0)
end
it 'servers can ignore a client write and send a status' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
the_status = Struct::Status.new(StatusCodes::OK, 'OK')
server_thread.join
server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq nil
expect(server_batch.send_status).to be true
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'completes calls by sending status to client and server' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
call = @stub.an_rpc(EchoMsg.new(msg: long_request_str),
return_op: true,
metadata: md)
response = call.execute
expect(response).to be_a(EchoMsg)
expect(response.msg).to eq(long_request_str)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq sent_message
server_ops = {
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_status).to be true
expect(server_batch.send_message).to be true
# confirm the client can receive the server response and status.
client_ops = {
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.send_close).to be true
expect(final_client_batch.message).to eq reply_text
expect(final_client_batch.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
final_server_batch = server_call.run_batch(server_ops)
expect(final_server_batch.send_close).to be true
end
def client_cancel_test(cancel_proc, expected_code,
expected_details)
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::RECV_INITIAL_METADATA => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.metadata).to eq({})
call = @stub.an_rpc(EchoMsg.new, return_op: true)
run_services_on_server(@server, services: [EchoService]) do
# start the call, but don't send a message yet
call.start_call
# cancel the call
cancel_proc.call(call)
server_thread.join
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
client_ops = {
CallOps::RECV_STATUS_ON_CLIENT => {}
}
client_batch = call.run_batch(client_ops)
expect(client_batch.status.code).to be expected_code
expect(client_batch.status.details).to eq expected_details
# check the client's status
failed = false
begin
call.execute
rescue GRPC::BadStatus => e
failed = true
expect(e.code).to be expected_code
expect(e.details).to eq expected_details
end
expect(failed).to be(true)
end
end
it 'clients can cancel a call on the server' do
@ -344,8 +89,6 @@ shared_examples 'basic GRPC message delivery is OK' do
end
shared_examples 'GRPC metadata delivery works OK' do
include_context 'setup: tags'
describe 'from client => server' do
before(:example) do
n = 7 # arbitrary number of metadata
@ -364,53 +107,31 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
call.run_batch(client_ops)
# NOTE: no need to run a server in this test b/c the failure
# happens while validating metadata to send.
failed = false
begin
@stub.an_rpc(EchoMsg.new, metadata: md)
rescue TypeError => e
failed = true
expect(e.message).to eq('grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
expect(failed).to be(true)
end
end
it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
end
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each_with_index do |md, i|
expect(@stub.an_rpc(EchoMsg.new, metadata: md)).to be_a(EchoMsg)
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
# finish the call
final_server_batch = recvd_rpc.call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
expect(service.received_md.length).to eq(i + 1)
md.each do |k, v|
expect(service.received_md[i][k.to_s]).to eq(v)
end
end
end
end
end
@ -432,120 +153,61 @@ shared_examples 'GRPC metadata delivery works OK' do
end
it 'raises an exception if a metadata key is invalid' do
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@bad_keys.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil
}
call.run_batch(client_ops)
# server gets the invocation
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
recvd_rpc.call.run_batch(server_ops)
proceed = Queue.new
server_exception = nil
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
rescue TypeError => e
server_exception = e
ensure
proceed.push(1)
end
client_exception = nil
client_call = @stub.an_rpc(EchoMsg.new, return_op: true)
thr = Thread.new do
client_call.execute
rescue GRPC::BadStatus => e
client_exception = e
end
proceed.pop
# TODO(apolcyn): we shouldn't need this cancel here. It's
# only currently needed b/c the server does not seem to properly
# terminate the RPC if it fails to send initial metadata. That
# should be fixed, in which case this cancellation can be removed.
client_call.cancel
thr.join
p client_exception
expect(client_exception.nil?).to be(false)
expect(server_exception.nil?).to be(false)
expect(server_exception.message).to eq(
'grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
# cancel the call so the server can shut down immediately
call.cancel
end
end
it 'sends an empty hash if no metadata is added' do
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
expect(call.metadata).to eq({})
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
# receive close and send status to finish the call
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
# receive status to finish the call
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'sends all the pairs when keys and values are valid' do
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
call = @stub.an_rpc(EchoMsg.new, return_op: true)
call.execute
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(final_client_batch.metadata).to eq(replace_symbols)
expect(final_client_batch.status.code).to eq(0)
expect(call.metadata).to eq(replace_symbols)
end
end
end
end
@ -554,16 +216,11 @@ end
describe 'the http client/server' do
before(:example) do
server_host = '0.0.0.0:0'
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
@server.start
@ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
end
after(:example) do
@ch.close
@server.shutdown_and_notify(deadline)
@server.close
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -574,8 +231,6 @@ describe 'the http client/server' do
end
describe 'the secure http client/server' do
include_context 'setup: tags'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'server1.key', 'server1.pem']
@ -587,17 +242,14 @@ describe 'the secure http client/server' do
server_host = '0.0.0.0:0'
server_creds = GRPC::Core::ServerCredentials.new(
nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, server_creds)
@server.start
args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
@ch = Channel.new("0.0.0.0:#{server_port}", args,
@ch = Channel.new(
"0.0.0.0:#{server_port}", args,
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
end
after(:example) do
@server.shutdown_and_notify(deadline)
@server.close
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -606,59 +258,25 @@ describe 'the secure http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do
end
def credentials_update_test(creds_update_md)
auth_proc = proc { creds_update_md }
it 'modifies metadata with CallCredentials' do
# create call creds
auth_proc = proc { { 'k1' => 'v1' } }
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
initial_md_key = 'k2'
initial_md_val = 'v2'
initial_md = { initial_md_key => initial_md_val }
expected_md = creds_update_md.clone
fail 'bad test param' unless expected_md[initial_md_key].nil?
expected_md[initial_md_key] = initial_md_val
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
end
call = new_client_call
call.set_credentials! call_creds
client_batch = call.run_batch(
CallOps::SEND_INITIAL_METADATA => initial_md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[expected_md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
credentials_update_test_finish_call(call, recvd_rpc.call)
# create arbitrary custom metadata
custom_md = { 'k2' => 'v2' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
def credentials_update_test_finish_call(client_call, server_call)
final_server_batch = server_call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = client_call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = { 'k1' => 'v1', 'k2' => 'v2' }
expected_md.each do |k, v|
expect(echo_service.received_md[0][k]).to eq(v)
end
it 'modifies metadata with CallCredentials' do
credentials_update_test('k1' => 'updated-v1')
end
it 'modifies large metadata with CallCredentials' do
@ -666,11 +284,34 @@ describe 'the secure http client/server' do
'00000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111',
)
md = {
k3: val_array,
k4: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey5: 'v1'
# create call creds
auth_proc = proc do
{
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
credentials_update_test(md)
end
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
# create arbitrary custom metadata
custom_md = { k1: 'v1' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = {
k1: 'v1',
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
expected_md.each do |k, v|
expect(echo_service.received_md[0][k.to_s]).to eq(v)
end
end
end

@ -55,17 +55,20 @@ describe GRPC::ActiveCall do
end
@ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
:this_channel_is_insecure)
@call = make_test_call
end
after(:each) do
@server.shutdown_and_notify(deadline)
@server.close
@server_thread.join
# Don't rely on GC to unref the call, since that can prevent
# the channel connectivity state polling thread from shutting down.
@call.close
end
describe 'restricted view methods' do
before(:each) do
@call = make_test_call
ActiveCall.client_invoke(@call)
@client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
@ -117,9 +120,8 @@ describe GRPC::ActiveCall do
describe '#remote_send' do
it 'allows a client to send a payload to the server', test: true do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -137,15 +139,14 @@ describe GRPC::ActiveCall do
expect(server_call.remote_read).to eq(msg)
# finish the call
server_call.send_initial_metadata
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
it 'marshals the payload using the marshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal, @pass_through, deadline)
client_call = ActiveCall.new(@call, marshal, @pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -161,23 +162,22 @@ describe GRPC::ActiveCall do
metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg)
# finish the call
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS]
TEST_WRITE_FLAGS.each do |f|
it "successfully makes calls with write_flag set to #{f}" do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal,
client_call = ActiveCall.new(@call, marshal,
@pass_through, deadline)
msg = 'message is a string'
client_call.write_flag = f
client_call.remote_send(msg)
# flush the message in case writes are set to buffered
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
# confirm that the message was marshalled
recvd_rpc = @received_rpcs_queue.pop
@ -199,9 +199,8 @@ describe GRPC::ActiveCall do
describe 'sending initial metadata', send_initial_metadata: true do
it 'sends metadata before sending a message if it hasnt been sent yet' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -213,13 +212,13 @@ describe GRPC::ActiveCall do
message = 'phony message'
expect(call).to(
expect(@call).to(
receive(:run_batch)
.with(
hash_including(
CallOps::SEND_INITIAL_METADATA => metadata)).once)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_MESSAGE => message)).once)
@client_call.remote_send(message)
@ -228,14 +227,12 @@ describe GRPC::ActiveCall do
end
it 'doesnt send metadata if it thinks its already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
expect(@client_call.metadata_sent).to eql(true)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA)).never)
@ -243,9 +240,7 @@ describe GRPC::ActiveCall do
end
it 'sends metadata if it is explicitly sent and ok to do so' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline,
@ -257,7 +252,7 @@ describe GRPC::ActiveCall do
@client_call.merge_metadata_to_send(metadata)
expect(@client_call.metadata_to_send).to eq(metadata)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA =>
metadata)).once)
@ -265,9 +260,7 @@ describe GRPC::ActiveCall do
end
it 'explicit sending does nothing if metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
@ -284,7 +277,6 @@ describe GRPC::ActiveCall do
describe '#merge_metadata_to_send', merge_metadata_to_send: true do
it 'adds to existing metadata when there is existing metadata to send' do
call = make_test_call
starting_metadata = {
k1: 'key1_val',
k2: 'key2_val',
@ -292,7 +284,7 @@ describe GRPC::ActiveCall do
}
@client_call = ActiveCall.new(
call,
@call,
@pass_through, @pass_through,
deadline,
started: false,
@ -318,9 +310,8 @@ describe GRPC::ActiveCall do
end
it 'fails when initial metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -338,9 +329,8 @@ describe GRPC::ActiveCall do
describe '#client_invoke' do
it 'sends metadata to the server when present' do
call = make_test_call
metadata = { k1: 'v1', k2: 'v2' }
ActiveCall.client_invoke(call, metadata)
ActiveCall.client_invoke(@call, metadata)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
expect(recvd_call).to_not be_nil
@ -349,15 +339,14 @@ describe GRPC::ActiveCall do
expect(recvd_rpc.metadata['k2']).to eq('v2')
# finish the call
recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {})
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
end
describe '#send_status', send_status: true do
it 'works when no metadata or messages have been sent yet' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
server_call = ActiveCall.new(
@ -375,9 +364,8 @@ describe GRPC::ActiveCall do
describe '#remote_read', remote_read: true do
it 'reads the response sent by a server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -385,13 +373,12 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves no metadata when the server adds no metadata' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -401,13 +388,12 @@ describe GRPC::ActiveCall do
client_call.remote_read
expect(client_call.metadata).to eq({})
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves metadata add by the server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -418,12 +404,11 @@ describe GRPC::ActiveCall do
expected = { 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected)
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'get a status from server when nothing else sent from server' do
client_call = make_test_call
ActiveCall.client_invoke(client_call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
@ -438,22 +423,21 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'OK')
# Check that we can receive initial metadata and a status
client_call.run_batch(
@call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil)
batch_result = client_call.run_batch(
batch_result = @call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(OK)
end
it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
server_call.send_status(OK, 'OK')
@ -463,10 +447,9 @@ describe GRPC::ActiveCall do
end
it 'unmarshals the response using the unmarshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
unmarshal, deadline)
# confirm the client receives the unmarshalled message
@ -476,14 +459,13 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
end
describe '#each_remote_read' do
it 'creates an Enumerator' do
call = make_test_call
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
expect(client_call.each_remote_read).to be_a(Enumerator)
# finish the call
@ -491,9 +473,8 @@ describe GRPC::ActiveCall do
end
it 'the returned enumerator can read n responses' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
@ -506,18 +487,17 @@ describe GRPC::ActiveCall do
expect(e.next).to eq(reply)
end
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
e = client_call.each_remote_read
n = 3 # arbitrary value > 1
@ -532,14 +512,13 @@ describe GRPC::ActiveCall do
describe '#closing the call from the client' do
it 'finishes ok if the server sends a status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
@ -549,9 +528,8 @@ describe GRPC::ActiveCall do
end
it 'finishes ok if the server sends an early status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -560,15 +538,14 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error
end
it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -577,7 +554,7 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(
@call.run_batch(
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
end.to_not raise_error
@ -631,6 +608,7 @@ describe GRPC::ActiveCall do
batch_result = @client_call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(@server_status)
@client_call.close
end
it 'sends the initial metadata implicitly if not already sent' do

@ -41,14 +41,17 @@ class EchoService
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg
attr_reader :received_md
attr_accessor :on_call_started
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
@on_call_started = nil
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
on_call_started&.call(call)
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req

@ -168,6 +168,25 @@ grpc_proto_fuzzer(
],
)
grpc_proto_fuzzer(
name = "server_fuzzer_chttp2_fake_creds",
srcs = ["server_fuzzer_chttp2_fake_creds.cc"],
corpus = "server_fuzzer_chttp2_fake_creds_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":server_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "server_fuzzer_chaotic_good",
srcs = ["server_fuzzer_chaotic_good.cc"],
@ -187,3 +206,60 @@ grpc_proto_fuzzer(
"//src/core:chaotic_good_server",
],
)
grpc_cc_library(
name = "connector_fuzzer",
srcs = ["connector_fuzzer.cc"],
hdrs = ["connector_fuzzer.h"],
external_deps = ["absl/log:check"],
deps = [
"fuzzer_input_proto",
"fuzzing_common",
"network_input",
"//:gpr",
"//:grpc",
"//src/core:channel_args",
"//test/core/event_engine/fuzzing_event_engine",
"//test/core/test_util:fuzz_config_vars",
"//test/core/test_util:grpc_test_util",
"//test/core/test_util:grpc_test_util_base",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2",
srcs = ["connector_fuzzer_chttp2.cc"],
corpus = "connector_fuzzer_chttp2_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2_fakesec",
srcs = ["connector_fuzzer_chttp2_fakesec.cc"],
corpus = "connector_fuzzer_chttp2_fakesec_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)

@ -0,0 +1,189 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
#include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/env.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
#include "test/core/end2end/fuzzers/network_input.h"
#include "test/core/test_util/fuzz_config_vars.h"
#include "test/core/test_util/test_config.h"
bool squelch = true;
bool leak_check = true;
using ::grpc_event_engine::experimental::ChannelArgsEndpointConfig;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::FuzzingEventEngine;
using ::grpc_event_engine::experimental::GetDefaultEventEngine;
using ::grpc_event_engine::experimental::MockEndpointController;
using ::grpc_event_engine::experimental::SetEventEngineFactory;
using ::grpc_event_engine::experimental::URIToResolvedAddress;
namespace grpc_core {
namespace {
class ConnectorFuzzer {
public:
ConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector)
: make_security_connector_(make_security_connector),
engine_([actions = msg.event_engine_actions()]() {
SetEventEngineFactory([actions]() -> std::unique_ptr<EventEngine> {
return std::make_unique<FuzzingEventEngine>(
FuzzingEventEngine::Options(), actions);
});
return std::dynamic_pointer_cast<FuzzingEventEngine>(
GetDefaultEventEngine());
}()),
mock_endpoint_controller_(MockEndpointController::Create(engine_)),
connector_(make_connector()) {
CHECK(engine_);
for (const auto& input : msg.network_input()) {
network_inputs_.push(input);
}
grpc_timer_manager_set_start_threaded(false);
grpc_init();
ExecCtx exec_ctx;
Executor::SetThreadingAll(false);
listener_ =
engine_
->CreateListener(
[this](std::unique_ptr<EventEngine::Endpoint> endpoint,
MemoryAllocator) {
if (network_inputs_.empty()) return;
ScheduleWrites(network_inputs_.front(), std::move(endpoint),
engine_.get());
network_inputs_.pop();
},
[](absl::Status) {}, ChannelArgsEndpointConfig(ChannelArgs{}),
std::make_unique<MemoryQuota>("foo"))
.value();
if (msg.has_shutdown_connector() &&
msg.shutdown_connector().delay_ms() > 0) {
auto shutdown_connector = msg.shutdown_connector();
const auto delay = Duration::Milliseconds(shutdown_connector.delay_ms());
engine_->RunAfterExactly(delay, [this, shutdown_connector = std::move(
shutdown_connector)]() {
if (connector_ == nullptr) return;
connector_->Shutdown(absl::Status(
static_cast<absl::StatusCode>(shutdown_connector.shutdown_status()),
shutdown_connector.shutdown_message()));
});
}
// Abbreviated runtime for interpreting API actions, since we simply don't
// support many here.
uint64_t when_ms = 0;
for (const auto& action : msg.api_actions()) {
switch (action.type_case()) {
default:
break;
case api_fuzzer::Action::kSleepMs:
when_ms += action.sleep_ms();
break;
case api_fuzzer::Action::kResizeResourceQuota:
engine_->RunAfterExactly(
Duration::Milliseconds(when_ms),
[this, new_size = action.resize_resource_quota()]() {
resource_quota_->memory_quota()->SetSize(new_size);
});
when_ms += 1;
break;
}
}
}
~ConnectorFuzzer() {
listener_.reset();
connector_.reset();
mock_endpoint_controller_.reset();
engine_->TickUntilIdle();
grpc_shutdown_blocking();
engine_->UnsetGlobalHooks();
}
void Run() {
grpc_resolved_address addr;
CHECK(grpc_parse_uri(URI::Parse("ipv4:127.0.0.1:1234").value(), &addr));
CHECK_OK(
listener_->Bind(URIToResolvedAddress("ipv4:127.0.0.1:1234").value()));
CHECK_OK(listener_->Start());
OrphanablePtr<grpc_endpoint> endpoint(
mock_endpoint_controller_->TakeCEndpoint());
SubchannelConnector::Result result;
bool done = false;
auto channel_args = ChannelArgs{}.SetObject<EventEngine>(engine_).SetObject(
resource_quota_);
auto security_connector = make_security_connector_();
if (security_connector != nullptr) {
channel_args = channel_args.SetObject(std::move(security_connector));
}
connector_->Connect(
SubchannelConnector::Args{&addr, nullptr,
Timestamp::Now() + Duration::Seconds(20),
channel_args},
&result, NewClosure([&done, &result](grpc_error_handle status) {
done = true;
if (status.ok()) result.transport->Orphan();
}));
while (!done) {
engine_->Tick();
grpc_timer_manager_tick();
}
}
private:
RefCountedPtr<ResourceQuota> resource_quota_ =
MakeRefCounted<ResourceQuota>("fuzzer");
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector_;
std::shared_ptr<FuzzingEventEngine> engine_;
std::queue<fuzzer_input::NetworkInput> network_inputs_;
std::shared_ptr<MockEndpointController> mock_endpoint_controller_;
std::unique_ptr<EventEngine::Listener> listener_;
OrphanablePtr<SubchannelConnector> connector_;
};
} // namespace
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector) {
if (squelch && !GetEnv("GRPC_TRACE_FUZZER").has_value()) {
grpc_disable_all_absl_logs();
}
static const int once = []() {
ForceEnableExperiment("event_engine_client", true);
ForceEnableExperiment("event_engine_listener", true);
return 42;
}();
CHECK_EQ(once, 42); // avoid unused variable warning
ApplyFuzzConfigVars(msg.config_vars());
TestOnlyReloadExperimentsFromConfigVariables();
ConnectorFuzzer(msg, make_security_connector, make_connector).Run();
}
} // namespace grpc_core

@ -0,0 +1,34 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#define GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#include "absl/functional/function_ref.h"
#include "src/core/client_channel/connector.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
namespace grpc_core {
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector);
}
#endif // GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_core::RefCountedPtr<grpc_channel_security_connector>();
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -0,0 +1,36 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/security_connector/fake/fake_security_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_fake_channel_security_connector_create(
grpc_core::RefCountedPtr<grpc_channel_credentials>(
grpc_fake_transport_security_credentials_create()),
nullptr, "foobar", grpc_core::ChannelArgs{});
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -172,6 +172,20 @@ message ChaoticGoodFrame {
message ChaoticGoodSettings {}
message FakeTransportFrame {
enum MessageString {
CLIENT_INIT = 0;
SERVER_INIT = 1;
CLIENT_FINISHED = 2;
SERVER_FINISHED = 3;
}
oneof payload {
bytes raw_bytes = 1;
MessageString message_string = 2;
}
}
message InputSegment {
int32 delay_ms = 1;
oneof payload {
@ -187,6 +201,7 @@ message InputSegment {
H2ClientPrefix client_prefix = 11;
uint32 repeated_zeros = 12;
ChaoticGoodFrame chaotic_good = 13;
FakeTransportFrame fake_transport_frame = 14;
}
}
@ -204,10 +219,18 @@ message NetworkInput {
}
}
// Only for connector fuzzer, when to drop the connector
message ShutdownConnector {
int32 delay_ms = 1;
int32 shutdown_status = 2;
string shutdown_message = 3;
}
message Msg {
repeated NetworkInput network_input = 1;
repeated api_fuzzer.Action api_actions = 2;
fuzzing_event_engine.Actions event_engine_actions = 3;
grpc.testing.FuzzConfigVars config_vars = 4;
grpc.testing.FuzzingChannelArgs channel_args = 5;
ShutdownConnector shutdown_connector = 6;
}

@ -267,6 +267,13 @@ SliceBuffer ChaoticGoodFrame(const fuzzer_input::ChaoticGoodFrame& frame) {
return out;
}
void store32_little_endian(uint32_t value, unsigned char* buf) {
buf[3] = static_cast<unsigned char>((value >> 24) & 0xFF);
buf[2] = static_cast<unsigned char>((value >> 16) & 0xFF);
buf[1] = static_cast<unsigned char>((value >> 8) & 0xFF);
buf[0] = static_cast<unsigned char>((value) & 0xFF);
}
grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
switch (segment.payload_case()) {
case fuzzer_input::InputSegment::kRawBytes:
@ -333,6 +340,38 @@ grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
.JoinIntoSlice()
.TakeCSlice();
} break;
case fuzzer_input::InputSegment::kFakeTransportFrame: {
auto generate = [](absl::string_view payload) {
uint32_t length = payload.length();
std::vector<unsigned char> bytes;
bytes.resize(4);
store32_little_endian(length + 4, bytes.data());
for (auto c : payload) {
bytes.push_back(static_cast<unsigned char>(c));
}
return grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(bytes.data()), bytes.size());
};
switch (segment.fake_transport_frame().payload_case()) {
case fuzzer_input::FakeTransportFrame::kRawBytes:
return generate(segment.fake_transport_frame().raw_bytes());
case fuzzer_input::FakeTransportFrame::kMessageString:
switch (segment.fake_transport_frame().message_string()) {
default:
return generate("UNKNOWN");
case fuzzer_input::FakeTransportFrame::CLIENT_INIT:
return generate("CLIENT_INIT");
case fuzzer_input::FakeTransportFrame::SERVER_INIT:
return generate("SERVER_INIT");
case fuzzer_input::FakeTransportFrame::CLIENT_FINISHED:
return generate("CLIENT_FINISHED");
case fuzzer_input::FakeTransportFrame::SERVER_FINISHED:
return generate("SERVER_FINISHED");
}
case fuzzer_input::FakeTransportFrame::PAYLOAD_NOT_SET:
return generate("");
}
}
case fuzzer_input::InputSegment::PAYLOAD_NOT_SET:
break;
}
@ -545,4 +584,15 @@ Duration ScheduleConnection(
return delay;
}
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine) {
auto schedule = MakeSchedule(network_input);
auto ep = std::shared_ptr<EventEngine::Endpoint>(std::move(endpoint));
ReadForever(ep);
ScheduleWritesForReads(ep, event_engine, std::move(schedule));
}
} // namespace grpc_core

@ -30,6 +30,12 @@ Duration ScheduleReads(
mock_endpoint_controller,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
Duration ScheduleConnection(
const fuzzer_input::NetworkInput& network_input,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine,

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc_security.h>
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/server_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunServerFuzzer(msg, [](grpc_server* server, int port_num,
const grpc_core::ChannelArgs&) {
auto* creds = grpc_fake_transport_security_server_credentials_create();
grpc_server_add_http2_port(
server, absl::StrCat("0.0.0.0:", port_num).c_str(), creds);
grpc_server_credentials_release(creds);
});
}

@ -67,12 +67,10 @@ void CancelAfterClientDone(
}
CORE_END2END_TEST(CoreEnd2endTest, CancelAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<CancelCancellationMode>());
}
CORE_END2END_TEST(CoreDeadlineTest, DeadlineAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<DeadlineCancellationMode>());
}

@ -18,6 +18,7 @@
#include <stdlib.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <limits>
#include <vector>
@ -32,6 +33,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/telemetry/stats.h"
@ -189,7 +191,15 @@ void FuzzingEventEngine::TickUntilIdle() {
while (true) {
{
grpc_core::MutexLock lock(&*mu_);
if (tasks_by_id_.empty()) return;
LOG_EVERY_N_SEC(INFO, 5)
<< "TickUntilIdle: "
<< GRPC_DUMP_ARGS(tasks_by_id_.size(), outstanding_reads_.load(),
outstanding_writes_.load());
if (tasks_by_id_.empty() &&
outstanding_writes_.load(std::memory_order_relaxed) == 0 &&
outstanding_reads_.load(std::memory_order_relaxed) == 0) {
return;
}
}
Tick();
}
@ -299,6 +309,9 @@ absl::Status FuzzingEventEngine::FuzzingListener::Start() {
bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
CHECK(!closed[index]);
const int peer_index = 1 - index;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: entry "
<< GRPC_DUMP_ARGS(data->Length());
if (data->Length() == 0) return true;
size_t write_len = std::numeric_limits<size_t>::max();
// Check the write_sizes queue for fuzzer imposed restrictions on this write
@ -315,12 +328,16 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
// byte.
if (write_len == 0) write_len = 1;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes";
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes; "
<< GRPC_DUMP_ARGS(pending_read[peer_index].has_value());
// Expand the pending buffer.
size_t prev_len = pending[index].size();
pending[index].resize(prev_len + write_len);
// Move bytes from the to-write data into the pending buffer.
data->MoveFirstNBytesIntoBuffer(write_len, pending[index].data() + prev_len);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: post-move "
<< GRPC_DUMP_ARGS(data->Length());
// If there was a pending read, then we can fulfill it.
if (pending_read[peer_index].has_value()) {
pending_read[peer_index]->buffer->Append(
@ -328,7 +345,11 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
pending[index].clear();
g_fuzzing_event_engine->RunLocked(
RunType::kWrite,
[cb = std::move(pending_read[peer_index]->on_read)]() mutable {
[cb = std::move(pending_read[peer_index]->on_read), this, peer_index,
buffer = pending_read[peer_index]->buffer]() mutable {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "FINISH_READ[" << this << ":" << peer_index
<< "]: " << GRPC_DUMP_ARGS(buffer->Length());
cb(absl::OkStatus());
});
pending_read[peer_index].reset();
@ -339,6 +360,10 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
bool FuzzingEventEngine::FuzzingEndpoint::Write(
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
const WriteArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_WRITE[" << middle_.get() << ":" << my_index()
<< "]: " << data->Length() << " bytes";
IoToken write_token(&g_fuzzing_event_engine->outstanding_writes_);
grpc_core::global_stats().IncrementSyscallWrite();
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
@ -346,24 +371,38 @@ bool FuzzingEventEngine::FuzzingEndpoint::Write(
// If the write succeeds immediately, then we return true.
if (middle_->Write(data, my_index())) return true;
middle_->writing[my_index()] = true;
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data);
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data,
std::move(write_token));
return false;
}
void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data) {
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) {
g_fuzzing_event_engine->RunLocked(
RunType::kWrite, [middle = std::move(middle), index, data,
on_writable = std::move(on_writable)]() mutable {
RunType::kWrite,
[write_token = std::move(write_token), middle = std::move(middle), index,
data, on_writable = std::move(on_writable)]() mutable {
grpc_core::ReleasableMutexLock lock(&*mu_);
CHECK(middle->writing[index]);
if (middle->closed[index]) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED[" << middle.get() << ":" << index << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[on_writable = std::move(on_writable)]() mutable {
on_writable(absl::InternalError("Endpoint closed"));
});
if (middle->pending_read[1 - index].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(
middle->pending_read[1 - index]->on_read)]() mutable {
cb(absl::InternalError("Endpoint closed"));
});
middle->pending_read[1 - index].reset();
}
return;
}
if (middle->Write(data, index)) {
@ -373,14 +412,23 @@ void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
return;
}
ScheduleDelayedWrite(std::move(middle), index, std::move(on_writable),
data);
data, std::move(write_token));
});
}
FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
grpc_core::MutexLock lock(&*mu_);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSE[" << middle_.get() << ":" << my_index() << "]: "
<< GRPC_DUMP_ARGS(
middle_->closed[my_index()], middle_->closed[peer_index()],
middle_->pending_read[my_index()].has_value(),
middle_->pending_read[peer_index()].has_value(),
middle_->writing[my_index()], middle_->writing[peer_index()]);
middle_->closed[my_index()] = true;
if (middle_->pending_read[my_index()].has_value()) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED_READING[" << middle_.get() << ":" << my_index() << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(middle_->pending_read[my_index()]->on_read)]() mutable {
@ -388,7 +436,7 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
});
middle_->pending_read[my_index()].reset();
}
if (!middle_->writing[peer_index()] &&
if (!middle_->writing[my_index()] &&
middle_->pending_read[peer_index()].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
@ -403,20 +451,25 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
bool FuzzingEventEngine::FuzzingEndpoint::Read(
absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer,
const ReadArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_READ[" << middle_.get() << ":" << my_index() << "]";
buffer->Clear();
IoToken read_token(&g_fuzzing_event_engine->outstanding_reads_);
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
if (middle_->pending[peer_index()].empty()) {
// If the endpoint is closed, fail asynchronously.
if (middle_->closed[peer_index()]) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, [on_read = std::move(on_read)]() mutable {
RunType::kRunAfter,
[read_token, on_read = std::move(on_read)]() mutable {
on_read(absl::InternalError("Endpoint closed"));
});
return false;
}
// If the endpoint has no pending data, then we need to wait for a write.
middle_->pending_read[my_index()] = PendingRead{std::move(on_read), buffer};
middle_->pending_read[my_index()] =
PendingRead{std::move(read_token), std::move(on_read), buffer};
return false;
} else {
// If the endpoint has pending data, then we can fulfill the read

@ -17,6 +17,7 @@
#include <stddef.h>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <map>
@ -124,6 +125,36 @@ class FuzzingEventEngine : public EventEngine {
}
private:
class IoToken {
public:
IoToken() : refs_(nullptr) {}
explicit IoToken(std::atomic<size_t>* refs) : refs_(refs) {
refs_->fetch_add(1, std::memory_order_relaxed);
}
~IoToken() {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
}
IoToken(const IoToken& other) : refs_(other.refs_) {
if (refs_ != nullptr) refs_->fetch_add(1, std::memory_order_relaxed);
}
IoToken& operator=(const IoToken& other) {
IoToken copy(other);
Swap(copy);
return *this;
}
IoToken(IoToken&& other) noexcept
: refs_(std::exchange(other.refs_, nullptr)) {}
IoToken& operator=(IoToken&& other) noexcept {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
refs_ = std::exchange(other.refs_, nullptr);
return *this;
}
void Swap(IoToken& other) { std::swap(refs_, other.refs_); }
private:
std::atomic<size_t>* refs_;
};
enum class RunType {
kWrite,
kRunAfter,
@ -183,6 +214,8 @@ class FuzzingEventEngine : public EventEngine {
// One read that's outstanding.
struct PendingRead {
// The associated io token
IoToken io_token;
// Callback to invoke when the read completes.
absl::AnyInvocable<void(absl::Status)> on_read;
// The buffer to read into.
@ -243,8 +276,8 @@ class FuzzingEventEngine : public EventEngine {
// endpoint shutdown, it's believed this is a legal implementation.
static void ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::shared_ptr<EndpointMiddle> middle_;
const int index_;
};
@ -299,6 +332,8 @@ class FuzzingEventEngine : public EventEngine {
std::queue<std::queue<size_t>> write_sizes_for_future_connections_
ABSL_GUARDED_BY(mu_);
grpc_pick_port_functions previous_pick_port_functions_;
std::atomic<size_t> outstanding_writes_{0};
std::atomic<size_t> outstanding_reads_{0};
grpc_core::Mutex run_after_duration_callback_mu_;
absl::AnyInvocable<void(Duration)> run_after_duration_callback_

@ -28,11 +28,12 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
#include "test/core/test_util/test_config.h"
namespace grpc_core {
namespace {
void AssertIndex(const HPackTable* tbl, uint32_t idx, const char* key,
void AssertIndex(HPackTable* tbl, uint32_t idx, const char* key,
const char* value) {
const auto* md = tbl->Lookup(idx);
ASSERT_NE(md, nullptr);
@ -113,6 +114,8 @@ TEST(HpackParserTableTest, ManyAdditions) {
ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i);
@ -134,6 +137,56 @@ TEST(HpackParserTableTest, ManyAdditions) {
value.c_str());
}
}
auto stats_after = global_stats().Collect();
EXPECT_EQ(stats_after->http2_hpack_hits - stats_before->http2_hpack_hits,
100000);
EXPECT_EQ(stats_after->http2_hpack_misses, stats_before->http2_hpack_misses);
}
TEST(HpackParserTableTest, ManyUnusedAdditions) {
auto tbl = std::make_unique<HPackTable>();
int i;
ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
const Timestamp start = Timestamp::Now();
for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i);
auto key_slice = Slice::FromCopiedString(key);
auto value_slice = Slice::FromCopiedString(value);
auto memento = HPackTable::Memento{
ParsedMetadata<grpc_metadata_batch>(
ParsedMetadata<grpc_metadata_batch>::FromSlicePair{},
std::move(key_slice), std::move(value_slice),
key.length() + value.length() + 32),
nullptr};
ASSERT_TRUE(tbl->Add(std::move(memento)));
}
tbl.reset();
auto stats_after = global_stats().Collect();
const Timestamp end = Timestamp::Now();
EXPECT_EQ(stats_after->http2_hpack_hits, stats_before->http2_hpack_hits);
EXPECT_EQ(stats_after->http2_hpack_misses - stats_before->http2_hpack_misses,
100000);
size_t num_buckets_changed = 0;
const auto& lifetime_before = stats_before->http2_hpack_entry_lifetime;
const auto& lifetime_after = stats_after->http2_hpack_entry_lifetime;
for (size_t i = 0; i < lifetime_before.bucket_count(); i++) {
if (lifetime_before.buckets()[i] != lifetime_after.buckets()[i]) {
EXPECT_LE(i, lifetime_before.BucketFor((end - start).millis()));
num_buckets_changed++;
}
}
EXPECT_GT(num_buckets_changed, 0);
}
} // namespace grpc_core

@ -112,6 +112,19 @@ grpc_cc_test(
],
)
grpc_cc_test(
name = "unique_ptr_with_bitset_test",
srcs = ["unique_ptr_with_bitset_test.cc"],
external_deps = ["gtest"],
language = "C++",
uses_event_engine = False,
uses_polling = False,
deps = [
"//:gpr_platform",
"//src/core:unique_ptr_with_bitset",
],
)
grpc_cc_test(
name = "useful_test",
srcs = ["useful_test.cc"],

@ -0,0 +1,60 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#include "src/core/util/unique_ptr_with_bitset.h"
#include <stdint.h>
#include <limits>
#include <memory>
#include "gtest/gtest.h"
#include <grpc/support/port_platform.h>
namespace grpc_core {
TEST(UniquePtrWithBitsetTest, Basic) {
UniquePtrWithBitset<int, 1> ptr;
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(42));
EXPECT_EQ(*ptr, 42);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.SetBit(0);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.reset();
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.ClearBit(0);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(43));
ptr.SetBit(0);
UniquePtrWithBitset<int, 1> ptr2;
ptr2 = std::move(ptr);
EXPECT_EQ(*ptr2, 43);
EXPECT_EQ(ptr2.TestBit(0), true);
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -54,7 +54,6 @@ DOCKERIMAGE_CURRENT_VERSIONS = {
"tools/dockerfile/distribtest/python_python38_buster_aarch64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_python38_buster_aarch64@sha256:0a93bf2a0303aebe1280bafad69df228b9444af9144c767d8169ecc70fb383f6",
"tools/dockerfile/distribtest/python_ubuntu2004_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2004_x64@sha256:288cf72bc98fc384b9352d1f6d258b3513925ffe5746dda7e2e343723dd5f733",
"tools/dockerfile/distribtest/python_ubuntu2204_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2204_x64@sha256:6054d639247a93af2b496f3c1ce48f63b2e07f5ba54e025f69bb232a747c644e",
"tools/dockerfile/distribtest/ruby_centos7_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_0.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_0@sha256:05c579d93764f12db1a60fa78a26e0f4d6179e54187a3a531c8ff955001731ec",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_1.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_1@sha256:a48bb08275a588fbcea21b6b6056514b69454f6844bd7db9fd72c796892d02e1",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_2.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_2@sha256:9604f8d07c3ea330cdc1ebe394f67828710bbfef52f0dc144e513e3627279b5a",

@ -302,6 +302,10 @@ with open("src/core/telemetry/stats_data.h", "w") as H:
print(" public:", file=H)
print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" size_t bucket_count() const { return %d; }" % shape.buckets,
file=H,
)
print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left,"
" const Histogram_%d_%d& right);"

@ -1 +0,0 @@
us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64:b37e078e920ba1f75bd26bc67c2d3496432e36af@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96

@ -1,33 +0,0 @@
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM centos:7
RUN yum update -y && yum install -y curl tar which
# Install rvm
RUN gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
RUN curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.7
RUN /bin/bash -l -c "rvm install ruby-2.7"
RUN /bin/bash -l -c "rvm use --default ruby-2.7"
RUN /bin/bash -l -c "echo 'gem: --no-document' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.7' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-document"
RUN mkdir /var/local/jenkins
RUN /bin/bash -l -c "echo '. /etc/profile.d/rvm.sh' >> ~/.bashrc"

@ -2956,6 +2956,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \
src/core/util/time_precise.h \
src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \
src/core/util/useful.h \
src/core/util/windows/cpu.cc \

@ -2736,6 +2736,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \
src/core/util/time_precise.h \
src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \
src/core/util/useful.h \
src/core/util/windows/cpu.cc \

@ -2,21 +2,24 @@
This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix).
The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack
The setup builds gRPC docker images for each language/runtime and upload it to Artifact Registry (AR). These images, encapsulating gRPC stack
from specific releases/tag, are used to test version compatibility between gRPC release versions.
## Step-by-step instructions for adding a GCR image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are:
## Step-by-step instructions for adding a AR docker image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server.
When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test.
The steps are:
- Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release.
- Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do
- `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx python ruby php`
- Verify that the new docker image was built successfully and uploaded to GCR. For example,
- `gcloud container images list --repository gcr.io/grpc-testing` lists available images.
- `gcloud container images list-tags gcr.io/grpc-testing/grpc_interop_java` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/gcr/images/grpc-testing?project=grpc-testing
- Verify that the new docker image was built successfully and uploaded to AR. For example,
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public` lists available images.
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java --include-tags` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/artifacts/docker/grpc-testing/us/testing-images-public
- Verify the just-created docker client image would pass backward compatibility test (it should). For example,
- `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java:v1.9.9` followed by
- `docker_image=gcr.io/grpc-testing/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9` followed by
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- Commit the change and create a PR to upstream/master.
- Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc
- Once tests pass, request a PR review.
@ -24,30 +27,34 @@ We have continuous nightly test setup to test gRPC backward compatibility betwee
For more details on each step, refer to sections below.
## Instructions for adding new language/runtimes
- Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`.
- Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`.
- Add language/runtimes to `client_matrix.py` following existing language/runtimes examples.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to GCR.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to AR.
## Instructions for creating new test cases
- Create test cases by running `LANG=<lang> [RELEASE=<release>] ./create_testcases.sh`. For example,
- `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script.
- `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi <image_id>`.
- Stage and commit the generated test case file `./testcases/<lang>__<release>`.
## Instructions for running test cases against GCR images
## Instructions for running test cases against AR docker images
- Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options:
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the GCR images with the tag have been created using `create_matrix_images.py` above.
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the AR images with the tag have been created using `create_matrix_images.py` above.
- `--language` specifies a language. Defaults to `--language=all`.
For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`.
- The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml').
## Instructions for running test cases against a GCR image manually
- Download docker image from GCR. For example: `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0`.
## Instructions for running test cases against an AR image manually
- Download a docker image from AR. For example: `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0`.
- Run test cases by specifying `docker_image` variable inline with the test case script created above.
For example:
- `docker_image=gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in GCR.
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in AR.
Note:
- File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`).
- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing.
- Creating and referencing images in AR require read and write permission to AR path us-docker.pkg.dev/grpc-testing.

@ -53,9 +53,9 @@ _BUILD_INFO = "/var/local/build_info"
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
@ -175,7 +175,7 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
stack_base: the local gRPC repo path.
"""
basename = "grpc_interop_%s" % runtime
tag = "%s/%s:%s" % (args.gcr_path, basename, gcr_tag)
tag = "%s/%s:%s" % (args.docker_path, basename, gcr_tag)
build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
@ -407,8 +407,8 @@ for lang in languages:
for image in docker_images:
if args.upload_images:
jobset.message("START", "Uploading %s" % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(":") != -1
# docker image name must be in the format <docker_path>/<image>:<gcr_tag>
assert image.startswith(args.docker_path) and image.find(":") != -1
# Add a tag to exclude the image from the GCP Vulnerability Scanner.
(image_name, tag_name) = image.rsplit(":", 1)
alternate_image = (

@ -56,9 +56,9 @@ _RELEASES = sorted(
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
"--release",
@ -348,7 +348,9 @@ languages = args.language if args.language != ["all"] else _LANGUAGES
total_num_failures = 0
_xml_report_tree = report_utils.new_junit_xml_tree()
for lang in languages:
docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path)
docker_images = _get_test_images_for_lang(
lang, args.release, args.docker_path
)
for runtime in sorted(docker_images.keys()):
total_num_failures += _run_tests_for_lang(
lang, runtime, docker_images[runtime], _xml_report_tree

@ -20,11 +20,8 @@ import %workspace%/tools/remote_build/include/rbe_base_config.bazelrc
# configure backend for remote execution
build --remote_executor=grpcs://remotebuildexecution.googleapis.com
build --spawn_strategy=remote
build --strategy=Javac=remote
build --strategy=Closure=remote
build --genrule_strategy=remote
build --remote_timeout=7200 # very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
# Very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
build --remote_timeout=7200
# In the remote execution environment, each test gets its own docker containers
# and port server won't be available.

@ -502,7 +502,6 @@ def targets():
protobuf_version="3.25",
presubmit=True,
),
RubyDistribTest("linux", "x64", "centos7"),
RubyDistribTest("linux", "x64", "ubuntu2004"),
RubyDistribTest("linux", "x64", "ubuntu2204", presubmit=True),
# PHP7

@ -11419,6 +11419,30 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "unique_ptr_with_bitset_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,

Loading…
Cancel
Save