Merge master

pull/37388/head
tanvi-jagtap 4 months ago
commit 079535c5e1
  1. 3
      BUILD
  2. 35
      CMakeLists.txt
  3. 1
      Package.swift
  4. 12
      bazel/grpc_deps.bzl
  5. 15
      build_autogenerated.yaml
  6. 1
      examples/cpp/otel/codelab/greeter_callback_client_solution.cc
  7. 2
      gRPC-C++.podspec
  8. 2
      gRPC-Core.podspec
  9. 1
      grpc.gemspec
  10. 1
      package.xml
  11. 11
      src/core/BUILD
  12. 41
      src/core/ext/transport/chaotic_good/client_transport.cc
  13. 3
      src/core/ext/transport/chaotic_good/frame.h
  14. 58
      src/core/ext/transport/chaotic_good/server_transport.cc
  15. 5
      src/core/ext/transport/chaotic_good/server_transport.h
  16. 6
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  17. 42
      src/core/ext/transport/chttp2/transport/hpack_parser_table.cc
  18. 33
      src/core/ext/transport/chttp2/transport/hpack_parser_table.h
  19. 9
      src/core/handshaker/security/secure_endpoint.cc
  20. 131
      src/core/handshaker/security/security_handshaker.cc
  21. 2
      src/core/lib/event_engine/ares_resolver.cc
  22. 8
      src/core/lib/experiments/experiments.yaml
  23. 12
      src/core/lib/iomgr/polling_entity.cc
  24. 6
      src/core/lib/surface/call.cc
  25. 36
      src/core/lib/transport/call_spine.h
  26. 29
      src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc
  27. 67
      src/core/telemetry/stats_data.cc
  28. 48
      src/core/telemetry/stats_data.h
  29. 8
      src/core/telemetry/stats_data.yaml
  30. 3
      src/core/tsi/fake_transport_security.cc
  31. 86
      src/core/util/unique_ptr_with_bitset.h
  32. 5
      src/core/xds/xds_client/xds_client.cc
  33. 13
      src/ruby/lib/grpc/generic/active_call.rb
  34. 93
      src/ruby/spec/call_spec.rb
  35. 6
      src/ruby/spec/channel_spec.rb
  36. 655
      src/ruby/spec/client_server_spec.rb
  37. 150
      src/ruby/spec/generic/active_call_spec.rb
  38. 3
      src/ruby/spec/support/services.rb
  39. 76
      test/core/end2end/fuzzers/BUILD
  40. 189
      test/core/end2end/fuzzers/connector_fuzzer.cc
  41. 34
      test/core/end2end/fuzzers/connector_fuzzer.h
  42. 30
      test/core/end2end/fuzzers/connector_fuzzer_chttp2.cc
  43. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_corpus/empty
  44. 36
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec.cc
  45. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec_corpus/empty
  46. 23
      test/core/end2end/fuzzers/fuzzer_input.proto
  47. 50
      test/core/end2end/fuzzers/network_input.cc
  48. 6
      test/core/end2end/fuzzers/network_input.h
  49. 30
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds.cc
  50. 1
      test/core/end2end/fuzzers/server_fuzzer_chttp2_fake_creds_corpus/empty
  51. 2
      test/core/end2end/tests/cancel_after_client_done.cc
  52. 75
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.cc
  53. 39
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.h
  54. 55
      test/core/transport/chttp2/hpack_parser_table_test.cc
  55. 13
      test/core/util/BUILD
  56. 60
      test/core/util/unique_ptr_with_bitset_test.cc
  57. 1
      tools/bazelify_tests/dockerimage_current_versions.bzl
  58. 4
      tools/codegen/core/gen_stats_data.py
  59. 1
      tools/dockerfile/distribtest/ruby_centos7_x64.current_version
  60. 33
      tools/dockerfile/distribtest/ruby_centos7_x64/Dockerfile
  61. 1
      tools/doxygen/Doxyfile.c++.internal
  62. 1
      tools/doxygen/Doxyfile.core.internal
  63. 39
      tools/interop_matrix/README.md
  64. 12
      tools/interop_matrix/create_matrix_images.py
  65. 10
      tools/interop_matrix/run_interop_matrix_tests.py
  66. 7
      tools/remote_build/include/rbe_remote_execution.bazelrc
  67. 1
      tools/run_tests/artifacts/distribtest_targets.py
  68. 24
      tools/run_tests/generated/tests.json

@ -4035,6 +4035,7 @@ grpc_cc_library(
deps = [
"gpr",
"tsi_base",
"//src/core:dump_args",
"//src/core:slice",
"//src/core:useful",
],
@ -4569,11 +4570,13 @@ grpc_cc_library(
"gpr_platform",
"grpc_trace",
"hpack_parse_result",
"stats",
"//src/core:hpack_constants",
"//src/core:metadata_batch",
"//src/core:no_destruct",
"//src/core:parsed_metadata",
"//src/core:slice",
"//src/core:unique_ptr_with_bitset",
],
)

35
CMakeLists.txt generated

@ -1527,6 +1527,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx try_join_test)
add_dependencies(buildtests_cxx try_seq_metadata_test)
add_dependencies(buildtests_cxx try_seq_test)
add_dependencies(buildtests_cxx unique_ptr_with_bitset_test)
add_dependencies(buildtests_cxx unique_type_name_test)
add_dependencies(buildtests_cxx unknown_frame_bad_client_test)
add_dependencies(buildtests_cxx uri_parser_test)
@ -32484,6 +32485,40 @@ target_link_libraries(try_seq_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(unique_ptr_with_bitset_test
test/core/util/unique_ptr_with_bitset_test.cc
)
target_compile_features(unique_ptr_with_bitset_test PUBLIC cxx_std_14)
target_include_directories(unique_ptr_with_bitset_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(unique_ptr_with_bitset_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
absl::check
absl::bits
)
endif()
if(gRPC_BUILD_TESTS)

1
Package.swift generated

@ -1950,6 +1950,7 @@ let package = Package(
"src/core/util/time_precise.cc",
"src/core/util/time_precise.h",
"src/core/util/tmpfile.h",
"src/core/util/unique_ptr_with_bitset.h",
"src/core/util/upb_utils.h",
"src/core/util/useful.h",
"src/core/util/windows/cpu.cc",

@ -23,10 +23,10 @@ def grpc_deps():
if "platforms" not in native.existing_rules():
http_archive(
name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
],
)
@ -168,10 +168,10 @@ def grpc_deps():
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f",
)
if "bazel_compdb" not in native.existing_rules():

@ -1219,6 +1219,7 @@ libs:
- src/core/util/latent_see.h
- src/core/util/ring_buffer.h
- src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h
- src/core/xds/grpc/certificate_provider_store.h
- src/core/xds/grpc/file_watcher_certificate_provider_factory.h
@ -2705,6 +2706,7 @@ libs:
- src/core/util/latent_see.h
- src/core/util/ring_buffer.h
- src/core/util/spinlock.h
- src/core/util/unique_ptr_with_bitset.h
- src/core/util/upb_utils.h
- third_party/upb/upb/generated_code_support.h
src:
@ -20491,6 +20493,19 @@ targets:
- absl/status:statusor
- gpr
uses_polling: false
- name: unique_ptr_with_bitset_test
gtest: true
build: test
language: c++
headers:
- src/core/util/unique_ptr_with_bitset.h
src:
- test/core/util/unique_ptr_with_bitset_test.cc
deps:
- gtest
- absl/log:check
- absl/numeric:bits
uses_polling: false
- name: unique_type_name_test
gtest: true
build: test

@ -128,7 +128,6 @@ void RunClient(const std::string& target_str) {
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
// CODELAB HINT : Add code to register OpenTelemetry plugin here.
// Register a global gRPC OpenTelemetry plugin configured with a prometheus
// exporter.
opentelemetry::exporter::metrics::PrometheusExporterOptions opts;

2
gRPC-C++.podspec generated

@ -1326,6 +1326,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',
@ -2609,6 +2610,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',

2
gRPC-Core.podspec generated

@ -2066,6 +2066,7 @@ Pod::Spec.new do |s|
'src/core/util/time_precise.cc',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/util/windows/cpu.cc',
@ -3389,6 +3390,7 @@ Pod::Spec.new do |s|
'src/core/util/string.h',
'src/core/util/time_precise.h',
'src/core/util/tmpfile.h',
'src/core/util/unique_ptr_with_bitset.h',
'src/core/util/upb_utils.h',
'src/core/util/useful.h',
'src/core/xds/grpc/certificate_provider_store.h',

1
grpc.gemspec generated

@ -1952,6 +1952,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/util/time_precise.cc )
s.files += %w( src/core/util/time_precise.h )
s.files += %w( src/core/util/tmpfile.h )
s.files += %w( src/core/util/unique_ptr_with_bitset.h )
s.files += %w( src/core/util/upb_utils.h )
s.files += %w( src/core/util/useful.h )
s.files += %w( src/core/util/windows/cpu.cc )

1
package.xml generated

@ -1934,6 +1934,7 @@
<file baseinstalldir="/" name="src/core/util/time_precise.cc" role="src" />
<file baseinstalldir="/" name="src/core/util/time_precise.h" role="src" />
<file baseinstalldir="/" name="src/core/util/tmpfile.h" role="src" />
<file baseinstalldir="/" name="src/core/util/unique_ptr_with_bitset.h" role="src" />
<file baseinstalldir="/" name="src/core/util/upb_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/util/useful.h" role="src" />
<file baseinstalldir="/" name="src/core/util/windows/cpu.cc" role="src" />

@ -289,6 +289,17 @@ grpc_cc_library(
deps = ["//:gpr_platform"],
)
grpc_cc_library(
name = "unique_ptr_with_bitset",
hdrs = ["util/unique_ptr_with_bitset.h"],
external_deps = [
"absl/log:check",
"absl/numeric:bits",
],
language = "c++",
deps = ["//:gpr_platform"],
)
grpc_cc_library(
name = "examine_stack",
srcs = [

@ -254,7 +254,11 @@ uint32_t ChaoticGoodClientTransport::MakeStream(CallHandler call_handler) {
const uint32_t stream_id = next_stream_id_++;
stream_map_.emplace(stream_id, call_handler);
lock.Release();
call_handler.OnDone([this, stream_id]() {
call_handler.OnDone([this, stream_id](bool cancelled) {
if (cancelled) {
outgoing_frames_.MakeSender().UnbufferedImmediateSend(
CancelFrame{stream_id});
}
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
});
@ -317,24 +321,23 @@ void ChaoticGoodClientTransport::StartCall(CallHandler call_handler) {
"outbound_loop", [self = RefAsSubclass<ChaoticGoodClientTransport>(),
call_handler]() mutable {
const uint32_t stream_id = self->MakeStream(call_handler);
return Map(self->CallOutboundLoop(stream_id, call_handler),
[stream_id, sender = self->outgoing_frames_.MakeSender()](
absl::Status result) mutable {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Call " << stream_id
<< " finished with " << result.ToString();
if (!result.ok()) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel";
CancelFrame frame;
frame.stream_id = stream_id;
if (!sender.UnbufferedImmediateSend(std::move(frame))) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel failed";
}
}
return result;
});
return Map(
self->CallOutboundLoop(stream_id, call_handler),
[stream_id, sender = self->outgoing_frames_.MakeSender()](
absl::Status result) mutable {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Call " << stream_id << " finished with "
<< result.ToString();
if (!result.ok()) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel";
if (!sender.UnbufferedImmediateSend(CancelFrame{stream_id})) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Send cancel failed";
}
}
return result;
});
});
}

@ -156,6 +156,9 @@ struct ServerFragmentFrame final : public FrameInterface {
};
struct CancelFrame final : public FrameInterface {
CancelFrame() = default;
explicit CancelFrame(uint32_t stream_id) : stream_id(stream_id) {}
absl::Status Deserialize(HPackParser* parser, const FrameHeader& header,
absl::BitGenRef bitsrc, Arena* arena,
BufferPair buffers, FrameLimits limits) override;

@ -72,8 +72,7 @@ auto ChaoticGoodServerTransport::TransportWriteLoop(
}
auto ChaoticGoodServerTransport::PushFragmentIntoCall(
CallInitiator call_initiator, ClientFragmentFrame frame,
uint32_t stream_id) {
CallInitiator call_initiator, ClientFragmentFrame frame) {
DCHECK(frame.headers == nullptr);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString();
@ -84,17 +83,15 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
std::move(frame.message->message));
},
[]() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id](StatusFlag status) mutable -> StatusFlag {
[call_initiator, end_of_stream = frame.end_of_stream](
StatusFlag status) mutable -> StatusFlag {
if (!status.ok() && GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: Failed PushFragmentIntoCall";
}
if (end_of_stream || !status.ok()) {
call_initiator.FinishSends();
// We have received end_of_stream. It is now safe to remove
// the call from the stream map.
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
// Note that we cannot remove from the stream map yet, as we
// may yet receive a cancellation.
}
return Success{};
});
@ -102,17 +99,16 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
absl::optional<CallInitiator> call_initiator, absl::Status error,
ClientFragmentFrame frame, uint32_t stream_id) {
ClientFragmentFrame frame) {
return If(
call_initiator.has_value() && error.ok(),
[this, &call_initiator, &frame, &stream_id]() {
[this, &call_initiator, &frame]() {
return Map(
call_initiator->SpawnWaitable(
"push-fragment",
[call_initiator, frame = std::move(frame), stream_id,
this]() mutable {
return call_initiator->CancelIfFails(PushFragmentIntoCall(
*call_initiator, std::move(frame), stream_id));
[call_initiator, frame = std::move(frame), this]() mutable {
return call_initiator->CancelIfFails(
PushFragmentIntoCall(*call_initiator, std::move(frame)));
}),
[](StatusFlag status) { return StatusCast<absl::Status>(status); });
},
@ -255,8 +251,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToNewCall(
}
}
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame),
frame_header.stream_id);
std::move(fragment_frame));
}
auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
@ -271,8 +266,7 @@ auto ChaoticGoodServerTransport::DeserializeAndPushFragmentToExistingCall(
frame_header, std::move(buffers), arena, fragment_frame,
FrameLimits{1024 * 1024 * 1024, aligned_bytes_ - 1});
return MaybePushFragmentIntoCall(std::move(call_initiator), std::move(status),
std::move(fragment_frame),
frame_header.stream_id);
std::move(fragment_frame));
}
auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
@ -305,6 +299,10 @@ auto ChaoticGoodServerTransport::ReadOneFrame(ChaoticGoodTransport& transport) {
[this, &frame_header]() {
absl::optional<CallInitiator> call_initiator =
ExtractStream(frame_header.stream_id);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "Cancel stream " << frame_header.stream_id
<< (call_initiator.has_value() ? " (active)"
: " (not found)");
return If(
call_initiator.has_value(),
[&call_initiator]() {
@ -410,6 +408,8 @@ void ChaoticGoodServerTransport::AbortWithError() {
absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " LookupStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt;
@ -418,6 +418,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::LookupStream(
absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
uint32_t stream_id) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " ExtractStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it == stream_map_.end()) return absl::nullopt;
@ -428,6 +430,8 @@ absl::optional<CallInitiator> ChaoticGoodServerTransport::ExtractStream(
absl::Status ChaoticGoodServerTransport::NewStream(
uint32_t stream_id, CallInitiator call_initiator) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << this << " NewStream " << stream_id;
MutexLock lock(&mu_);
auto it = stream_map_.find(stream_id);
if (it != stream_map_.end()) {
@ -437,10 +441,20 @@ absl::Status ChaoticGoodServerTransport::NewStream(
return absl::InternalError("Stream id is not increasing");
}
stream_map_.emplace(stream_id, call_initiator);
call_initiator.OnDone([this, stream_id]() {
MutexLock lock(&mu_);
stream_map_.erase(stream_id);
});
call_initiator.OnDone(
[self = RefAsSubclass<ChaoticGoodServerTransport>(), stream_id](bool) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD " << self.get() << " OnDone " << stream_id;
absl::optional<CallInitiator> call_initiator =
self->ExtractStream(stream_id);
if (call_initiator.has_value()) {
auto c = std::move(*call_initiator);
c.SpawnInfallible("cancel", [c]() mutable {
c.Cancel();
return Empty{};
});
}
});
return absl::OkStatus();
}

@ -131,10 +131,9 @@ class ChaoticGoodServerTransport final : public ServerTransport {
FrameHeader frame_header, BufferPair buffers,
ChaoticGoodTransport& transport);
auto MaybePushFragmentIntoCall(absl::optional<CallInitiator> call_initiator,
absl::Status error, ClientFragmentFrame frame,
uint32_t stream_id);
absl::Status error, ClientFragmentFrame frame);
auto PushFragmentIntoCall(CallInitiator call_initiator,
ClientFragmentFrame frame, uint32_t stream_id);
ClientFragmentFrame frame);
RefCountedPtr<UnstartedCallDestination> call_destination_;
const RefCountedPtr<CallArenaAllocator> call_arena_allocator_;

@ -713,7 +713,7 @@ class HPackParser::Parser {
LOG(INFO) << "HTTP:" << log_info_.stream_id << ":" << type << ":"
<< (log_info_.is_client ? "CLI" : "SVR") << ": "
<< memento.md.DebugString()
<< (memento.parse_status == nullptr
<< (memento.parse_status.get() == nullptr
? ""
: absl::StrCat(
" (parse error: ",
@ -724,7 +724,7 @@ class HPackParser::Parser {
void EmitHeader(const HPackTable::Memento& md) {
// Pass up to the transport
state_.frame_length += md.md.transport_size();
if (md.parse_status != nullptr) {
if (md.parse_status.get() != nullptr) {
// Reject any requests with invalid metadata.
input_->SetErrorAndContinueParsing(*md.parse_status);
}
@ -974,7 +974,7 @@ class HPackParser::Parser {
} else {
const auto* memento = absl::get<const HPackTable::Memento*>(state_.key);
key_string = memento->md.key();
if (state_.field_error.ok() && memento->parse_status != nullptr) {
if (state_.field_error.ok() && memento->parse_status.get() != nullptr) {
input_->SetErrorAndContinueParsing(*memento->parse_status);
}
}

@ -37,6 +37,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_parse_result.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
namespace grpc_core {
@ -47,6 +48,10 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
return entries_.push_back(std::move(m));
}
size_t index = (first_entry_ + num_entries_) % max_entries_;
if (timestamp_index_ == kNoTimestamp) {
timestamp_index_ = index;
timestamp_ = Timestamp::Now();
}
entries_[index] = std::move(m);
++num_entries_;
}
@ -54,12 +59,31 @@ void HPackTable::MementoRingBuffer::Put(Memento m) {
auto HPackTable::MementoRingBuffer::PopOne() -> Memento {
CHECK_GT(num_entries_, 0u);
size_t index = first_entry_ % max_entries_;
if (index == timestamp_index_) {
global_stats().IncrementHttp2HpackEntryLifetime(
(Timestamp::Now() - timestamp_).millis());
timestamp_index_ = kNoTimestamp;
}
++first_entry_;
--num_entries_;
return std::move(entries_[index]);
auto& entry = entries_[index];
if (!entry.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
return std::move(entry);
}
auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) const
auto HPackTable::MementoRingBuffer::Lookup(uint32_t index) -> const Memento* {
if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
auto& entry = entries_[offset];
const bool was_used = entry.parse_status.TestBit(Memento::kUsedBit);
entry.parse_status.SetBit(Memento::kUsedBit);
if (!was_used) global_stats().IncrementHttp2HpackHits();
return &entry;
}
auto HPackTable::MementoRingBuffer::Peek(uint32_t index) const
-> const Memento* {
if (index >= num_entries_) return nullptr;
uint32_t offset = (num_entries_ - 1u - index + first_entry_) % max_entries_;
@ -79,14 +103,22 @@ void HPackTable::MementoRingBuffer::Rebuild(uint32_t max_entries) {
entries_.swap(entries);
}
void HPackTable::MementoRingBuffer::ForEach(
absl::FunctionRef<void(uint32_t, const Memento&)> f) const {
template <typename F>
void HPackTable::MementoRingBuffer::ForEach(F f) const {
uint32_t index = 0;
while (auto* m = Lookup(index++)) {
while (auto* m = Peek(index++)) {
f(index, *m);
}
}
HPackTable::MementoRingBuffer::~MementoRingBuffer() {
ForEach([](uint32_t, const Memento& m) {
if (!m.parse_status.TestBit(Memento::kUsedBit)) {
global_stats().IncrementHttp2HpackMisses();
}
});
}
// Evict one element from the table
void HPackTable::EvictOne() {
auto first_entry = entries_.PopOne();

@ -21,6 +21,8 @@
#include <stdint.h>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <vector>
@ -34,6 +36,7 @@
#include "src/core/lib/gprpp/no_destruct.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/parsed_metadata.h"
#include "src/core/util/unique_ptr_with_bitset.h"
namespace grpc_core {
@ -54,11 +57,14 @@ class HPackTable {
struct Memento {
ParsedMetadata<grpc_metadata_batch> md;
std::unique_ptr<HpackParseResult> parse_status;
// Alongside parse_status we store one bit indicating whether this memento
// has been looked up (and therefore consumed) or not.
UniquePtrWithBitset<HpackParseResult, 1> parse_status;
static const int kUsedBit = 0;
};
// Lookup, but don't ref.
const Memento* Lookup(uint32_t index) const {
const Memento* Lookup(uint32_t index) {
// Static table comes first, just return an entry from it.
// NB: This imposes the constraint that the first
// GRPC_CHTTP2_LAST_STATIC_ENTRY entries in the core static metadata table
@ -97,6 +103,14 @@ class HPackTable {
class MementoRingBuffer {
public:
MementoRingBuffer() {}
~MementoRingBuffer();
MementoRingBuffer(const MementoRingBuffer&) = delete;
MementoRingBuffer& operator=(const MementoRingBuffer&) = delete;
MementoRingBuffer(MementoRingBuffer&&) = default;
MementoRingBuffer& operator=(MementoRingBuffer&&) = default;
// Rebuild this buffer with a new max_entries_ size.
void Rebuild(uint32_t max_entries);
@ -109,10 +123,11 @@ class HPackTable {
Memento PopOne();
// Lookup the entry at index, or return nullptr if none exists.
const Memento* Lookup(uint32_t index) const;
const Memento* Lookup(uint32_t index);
const Memento* Peek(uint32_t index) const;
void ForEach(absl::FunctionRef<void(uint32_t dynamic_index, const Memento&)>
f) const;
template <typename F>
void ForEach(F f) const;
uint32_t max_entries() const { return max_entries_; }
uint32_t num_entries() const { return num_entries_; }
@ -126,11 +141,17 @@ class HPackTable {
// Maximum number of entries we could possibly fit in the table, given
// defined overheads.
uint32_t max_entries_ = hpack_constants::kInitialTableEntries;
// Which index holds a timestamp (or kNoTimestamp if none do).
static constexpr uint32_t kNoTimestamp =
std::numeric_limits<uint32_t>::max();
uint32_t timestamp_index_ = kNoTimestamp;
// The timestamp associated with timestamp_entry_.
Timestamp timestamp_;
std::vector<Memento> entries_;
};
const Memento* LookupDynamic(uint32_t index) const {
const Memento* LookupDynamic(uint32_t index) {
// Not static - find the value in the list of valid entries
const uint32_t tbl_index = index - (hpack_constants::kLastStaticEntry + 1);
return entries_.Lookup(tbl_index);

@ -108,7 +108,6 @@ struct secure_endpoint : public grpc_endpoint {
}
~secure_endpoint() {
memory_owner.Reset();
tsi_frame_protector_destroy(protector);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
grpc_slice_buffer_destroy(&source_buffer);
@ -380,9 +379,12 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
static void on_write(void* user_data, grpc_error_handle error) {
secure_endpoint* ep = static_cast<secure_endpoint*>(user_data);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, std::exchange(ep->write_cb, nullptr),
std::move(error));
grpc_closure* cb = ep->write_cb;
ep->write_cb = nullptr;
SECURE_ENDPOINT_UNREF(ep, "write");
grpc_core::EnsureRunInExecCtx([cb, error = std::move(error)]() {
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
});
}
static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
@ -505,6 +507,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->wrapped_ep.reset();
ep->memory_owner.Reset();
SECURE_ENDPOINT_UNREF(ep, "destroy");
}

@ -88,27 +88,27 @@ class SecurityHandshaker : public Handshaker {
private:
grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received,
size_t bytes_received_size);
size_t bytes_received_size)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
grpc_error_handle OnHandshakeNextDoneLocked(
tsi_result result, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
void HandshakeFailedLocked(absl::Status error);
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void HandshakeFailedLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Finish(absl::Status status);
void OnHandshakeDataReceivedFromPeerFn(absl::Status error);
void OnHandshakeDataSentToPeerFn(absl::Status error);
static void OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error);
static void OnHandshakeDataSentToPeerFnScheduler(void* arg,
grpc_error_handle error);
void OnHandshakeDataReceivedFromPeerFnScheduler(grpc_error_handle error);
void OnHandshakeDataSentToPeerFnScheduler(grpc_error_handle error);
static void OnHandshakeNextDoneGrpcWrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
static void OnPeerCheckedFn(void* arg, grpc_error_handle error);
void OnPeerCheckedInner(grpc_error_handle error);
void OnPeerCheckedFn(grpc_error_handle error);
size_t MoveReadBufferIntoHandshakeBuffer();
grpc_error_handle CheckPeerLocked();
grpc_error_handle CheckPeerLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// State set at creation time.
tsi_handshaker* handshaker_;
@ -125,13 +125,11 @@ class SecurityHandshaker : public Handshaker {
size_t handshake_buffer_size_;
unsigned char* handshake_buffer_;
SliceBuffer outgoing_;
grpc_closure on_handshake_data_sent_to_peer_;
grpc_closure on_handshake_data_received_from_peer_;
grpc_closure on_peer_checked_;
RefCountedPtr<grpc_auth_context> auth_context_;
tsi_handshaker_result* handshaker_result_ = nullptr;
size_t max_frame_size_ = 0;
std::string tsi_handshake_error_;
grpc_closure* on_peer_checked_ ABSL_GUARDED_BY(mu_) = nullptr;
};
SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
@ -143,10 +141,7 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
handshake_buffer_(
static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))),
max_frame_size_(
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {
GRPC_CLOSURE_INIT(&on_peer_checked_, &SecurityHandshaker::OnPeerCheckedFn,
this, grpc_schedule_on_exec_ctx);
}
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {}
SecurityHandshaker::~SecurityHandshaker() {
tsi_handshaker_destroy(handshaker_);
@ -220,8 +215,9 @@ MakeChannelzSecurityFromAuthContext(grpc_auth_context* auth_context) {
} // namespace
void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
void SecurityHandshaker::OnPeerCheckedFn(grpc_error_handle error) {
MutexLock lock(&mu_);
on_peer_checked_ = nullptr;
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(error);
return;
@ -317,11 +313,6 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
Finish(absl::OkStatus());
}
void SecurityHandshaker::OnPeerCheckedFn(void* arg, grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker>(static_cast<SecurityHandshaker*>(arg))
->OnPeerCheckedInner(error);
}
grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
tsi_peer peer;
tsi_result result =
@ -330,8 +321,12 @@ grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (",
tsi_result_to_string(result), ")"));
}
on_peer_checked_ = NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnPeerCheckedFn(std::move(status));
});
connector_->check_peer(peer, args_->endpoint.get(), args_->args,
&auth_context_, &on_peer_checked_);
&auth_context_, on_peer_checked_);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME);
const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
@ -356,10 +351,10 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
CHECK_EQ(bytes_to_send_size, 0u);
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
return error;
}
@ -387,19 +382,19 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size));
grpc_endpoint_write(
args_->endpoint.get(), outgoing_.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_sent_to_peer_,
&SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler, this,
grpc_schedule_on_exec_ctx),
NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnHandshakeDataSentToPeerFnScheduler(std::move(status));
}),
nullptr, /*max_frame_size=*/INT_MAX);
} else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer.
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
// Handshake has finished, check peer and so on.
@ -418,8 +413,6 @@ void SecurityHandshaker::OnHandshakeNextDoneGrpcWrapper(
result, bytes_to_send, bytes_to_send_size, handshaker_result);
if (!error.ok()) {
h->HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
}
}
@ -429,13 +422,15 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
const unsigned char* bytes_to_send = nullptr;
size_t bytes_to_send_size = 0;
tsi_handshaker_result* hs_result = nullptr;
auto self = RefAsSubclass<SecurityHandshaker>();
tsi_result result = tsi_handshaker_next(
handshaker_, bytes_received, bytes_received_size, &bytes_to_send,
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper, this,
&tsi_handshake_error_);
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper,
self.get(), &tsi_handshake_error_);
if (result == TSI_ASYNC) {
// Handshaker operating asynchronously. Nothing else to do here;
// callback will be invoked in a TSI thread.
// Handshaker operating asynchronously. Callback will be invoked in a TSI
// thread. We no longer own the ref held in self.
self.release();
return absl::OkStatus();
}
// Handshaker returned synchronously. Invoke callback directly in
@ -449,18 +444,18 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataReceivedFromPeerFn(std::move(error));
});
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnHandshakeDataReceivedFromPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -473,8 +468,6 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(std::move(error));
} else {
handshaker.release(); // Avoid unref
}
}
@ -483,18 +476,18 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataSentToPeerFn(std::move(error));
});
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnHandshakeDataSentToPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -505,10 +498,10 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
if (handshaker_result_ == nullptr) {
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
error = CheckPeerLocked();
@ -517,7 +510,6 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
return;
}
}
handshaker.release(); // Avoid unref
}
//
@ -528,7 +520,7 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
is_shutdown_ = true;
connector_->cancel_check_peer(&on_peer_checked_, std::move(error));
connector_->cancel_check_peer(on_peer_checked_, std::move(error));
tsi_handshaker_shutdown(handshaker_);
args_->endpoint.reset();
}
@ -537,7 +529,6 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
void SecurityHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
auto ref = Ref();
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = std::move(on_handshake_done);
@ -546,8 +537,6 @@ void SecurityHandshaker::DoHandshake(
DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(error);
} else {
ref.release(); // Avoid unref
}
}

@ -97,6 +97,8 @@ absl::Status AresStatusToAbslStatus(int status, absl::string_view error_msg) {
return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default:
return absl::UnknownError(error_msg);
}

@ -55,14 +55,14 @@
- name: canary_client_privacy
description:
If set, canary client privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false
- name: client_privacy
description:
If set, client privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false
@ -88,7 +88,7 @@
uses_polling: true
- name: free_large_allocator
description: If set, return all free bytes from a "big" allocator
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: [resource_quota_test]
- name: max_pings_wo_data_throttle
@ -138,7 +138,7 @@
- name: server_privacy
description:
If set, server privacy
expiry: 2024/08/01
expiry: 2024/12/01
owner: alishananda@google.com
test_tags: []
allow_in_fuzzing_config: false

@ -30,8 +30,12 @@
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
if (pollset_set == nullptr) {
pollent.tag = GRPC_POLLS_NONE;
} else {
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
}
return pollent;
}
@ -73,6 +77,8 @@ void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));
@ -93,6 +99,8 @@ void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));

@ -337,9 +337,9 @@ void Call::HandleCompressionAlgorithmDisabled(
void Call::UpdateDeadline(Timestamp deadline) {
ReleasableMutexLock lock(&deadline_mu_);
if (GRPC_TRACE_FLAG_ENABLED(call)) {
VLOG(2) << "[call " << this
<< "] UpdateDeadline from=" << deadline_.ToString()
<< " to=" << deadline.ToString();
LOG(INFO) << "[call " << this
<< "] UpdateDeadline from=" << deadline_.ToString()
<< " to=" << deadline.ToString();
}
if (deadline >= deadline_) return;
if (deadline < Timestamp::Now()) {

@ -51,23 +51,24 @@ class CallSpine final : public Party {
std::move(client_initial_metadata), std::move(arena)));
}
~CallSpine() override {}
~CallSpine() override { CallOnDone(true); }
CallFilters& call_filters() { return call_filters_; }
// Add a callback to be called when server trailing metadata is received.
void OnDone(absl::AnyInvocable<void()> fn) {
void OnDone(absl::AnyInvocable<void(bool)> fn) {
if (on_done_ == nullptr) {
on_done_ = std::move(fn);
return;
}
on_done_ = [first = std::move(fn), next = std::move(on_done_)]() mutable {
first();
next();
on_done_ = [first = std::move(fn),
next = std::move(on_done_)](bool cancelled) mutable {
first(cancelled);
next(cancelled);
};
}
void CallOnDone() {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)();
void CallOnDone(bool cancelled) {
if (on_done_ != nullptr) std::exchange(on_done_, nullptr)(cancelled);
}
auto PullServerInitialMetadata() {
@ -75,7 +76,12 @@ class CallSpine final : public Party {
}
auto PullServerTrailingMetadata() {
return call_filters().PullServerTrailingMetadata();
return Map(
call_filters().PullServerTrailingMetadata(),
[this](ServerMetadataHandle result) {
CallOnDone(result->get(GrpcCallWasCancelled()).value_or(false));
return result;
});
}
auto PushClientToServerMessage(MessageHandle message) {
@ -190,7 +196,7 @@ class CallSpine final : public Party {
// Call filters/pipes part of the spine
CallFilters call_filters_;
absl::AnyInvocable<void()> on_done_{nullptr};
absl::AnyInvocable<void(bool)> on_done_{nullptr};
};
class CallInitiator {
@ -227,7 +233,9 @@ class CallInitiator {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename PromiseFactory>
void SpawnGuarded(absl::string_view name, PromiseFactory promise_factory) {
@ -274,7 +282,9 @@ class CallHandler {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise>
auto CancelIfFails(Promise promise) {
@ -327,7 +337,9 @@ class UnstartedCallHandler {
spine_->PushServerTrailingMetadata(std::move(status));
}
void OnDone(absl::AnyInvocable<void()> fn) { spine_->OnDone(std::move(fn)); }
void OnDone(absl::AnyInvocable<void(bool)> fn) {
spine_->OnDone(std::move(fn));
}
template <typename Promise>
auto CancelIfFails(Promise promise) {

@ -181,6 +181,22 @@ class GrpcAresQuery final {
const std::string name_;
};
static absl::Status AresStatusToAbslStatus(int status,
absl::string_view error_msg) {
switch (status) {
case ARES_ECANCELLED:
return absl::CancelledError(error_msg);
case ARES_ENOTIMP:
return absl::UnimplementedError(error_msg);
case ARES_ENOTFOUND:
return absl::NotFoundError(error_msg);
case ARES_ECONNREFUSED:
return absl::UnavailableError(error_msg);
default:
return absl::UnknownError(error_msg);
}
}
static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
@ -715,8 +731,8 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
hr->qtype, hr->host, hr->is_balancer, ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked: %s", r,
error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
destroy_hostbyname_request_locked(hr);
}
@ -761,8 +777,8 @@ static void on_srv_query_done_locked(void* arg, int status, int /*timeouts*/,
ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked: %s", r,
error_msg.c_str());
grpc_error_handle error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
delete q;
}
@ -780,7 +796,6 @@ static void on_txt_done_locked(void* arg, int status, int /*timeouts*/,
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
grpc_error_handle error;
if (status != ARES_SUCCESS) goto fail;
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked name=%s ARES_SUCCESS", r,
q->name().c_str());
@ -824,8 +839,8 @@ fail:
q->name(), ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r,
error_msg.c_str());
error = GRPC_ERROR_CREATE(error_msg);
r->error = grpc_error_add_child(error, r->error);
r->error =
grpc_error_add_child(AresStatusToAbslStatus(status, error_msg), r->error);
}
grpc_error_handle set_request_dns_server(grpc_ares_request* r,

@ -106,6 +106,20 @@ Histogram_10000_20 operator-(const Histogram_10000_20& left,
}
return result;
}
void HistogramCollector_1800000_40::Collect(
Histogram_1800000_40* result) const {
for (int i = 0; i < 40; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right) {
Histogram_1800000_40 result;
for (int i = 0; i < 40; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
const absl::string_view
GlobalStats::counter_name[static_cast<int>(Counter::COUNT)] = {
"client_calls_created",
@ -123,6 +137,8 @@ const absl::string_view
"http2_writes_begun",
"http2_transport_stalls",
"http2_stream_stalls",
"http2_hpack_hits",
"http2_hpack_misses",
"cq_pluck_creates",
"cq_next_creates",
"cq_callback_creates",
@ -161,6 +177,8 @@ const absl::string_view GlobalStats::counter_doc[static_cast<int>(
"control window",
"Number of times sending was completely stalled by the stream flow control "
"window",
"Number of HPACK cache hits",
"Number of HPACK cache misses (entries added but never used)",
"Number of completion queues created for cq_pluck (indicates sync api "
"usage)",
"Number of completion queues created for cq_next (indicates cq async api "
@ -192,6 +210,7 @@ const absl::string_view
"tcp_read_offer_iov_size",
"http2_send_message_size",
"http2_metadata_size",
"http2_hpack_entry_lifetime",
"wrr_subchannel_list_size",
"wrr_subchannel_ready_size",
"work_serializer_run_time_ms",
@ -223,6 +242,7 @@ const absl::string_view GlobalStats::histogram_doc[static_cast<int>(
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
"Number of bytes consumed by metadata, according to HPACK accounting rules",
"Lifetime of HPACK entries in the cache (in milliseconds)",
"Number of subchannels in a subchannel list at picker creation time",
"Number of READY subchannels in a subchannel list at picker creation time",
"Number of milliseconds work serializers run for",
@ -278,6 +298,15 @@ const int kStatsTable10[21] = {0, 1, 2, 4, 7, 12, 19,
const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8,
9, 9, 10, 11, 12, 12, 13, 14,
15, 15, 16, 17, 18, 18, 19};
const int kStatsTable12[41] = {
0, 1, 2, 3, 5, 8, 12, 18, 26,
37, 53, 76, 108, 153, 217, 308, 436, 617,
873, 1235, 1748, 2473, 3499, 4950, 7003, 9907, 14015,
19825, 28044, 39670, 56116, 79379, 112286, 158835, 224680, 317821,
449574, 635945, 899575, 1272492, 1800000};
const uint8_t kStatsTable13[37] = {
4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
} // namespace
int Histogram_100000_20::BucketFor(int value) {
if (value < 3) {
@ -405,6 +434,29 @@ int Histogram_10000_20::BucketFor(int value) {
}
}
}
int Histogram_1800000_40::BucketFor(int value) {
if (value < 4) {
if (value < 0) {
return 0;
} else {
return value;
}
} else {
if (value < 1048577) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable13[((val.uint - 4616189618054758400ull) >> 51)];
return bucket - (value < kStatsTable12[bucket]);
} else {
if (value < 1272492) {
return 38;
} else {
return 39;
}
}
}
}
GlobalStats::GlobalStats()
: client_calls_created{0},
server_calls_created{0},
@ -421,6 +473,8 @@ GlobalStats::GlobalStats()
http2_writes_begun{0},
http2_transport_stalls{0},
http2_stream_stalls{0},
http2_hpack_hits{0},
http2_hpack_misses{0},
cq_pluck_creates{0},
cq_next_creates{0},
cq_callback_creates{0},
@ -466,6 +520,9 @@ HistogramView GlobalStats::histogram(Histogram which) const {
case Histogram::kHttp2MetadataSize:
return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26,
http2_metadata_size.buckets()};
case Histogram::kHttp2HpackEntryLifetime:
return HistogramView{&Histogram_1800000_40::BucketFor, kStatsTable12, 40,
http2_hpack_entry_lifetime.buckets()};
case Histogram::kWrrSubchannelListSize:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
wrr_subchannel_list_size.buckets()};
@ -560,6 +617,10 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.http2_transport_stalls.load(std::memory_order_relaxed);
result->http2_stream_stalls +=
data.http2_stream_stalls.load(std::memory_order_relaxed);
result->http2_hpack_hits +=
data.http2_hpack_hits.load(std::memory_order_relaxed);
result->http2_hpack_misses +=
data.http2_hpack_misses.load(std::memory_order_relaxed);
result->cq_pluck_creates +=
data.cq_pluck_creates.load(std::memory_order_relaxed);
result->cq_next_creates +=
@ -598,6 +659,8 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
data.tcp_read_offer_iov_size.Collect(&result->tcp_read_offer_iov_size);
data.http2_send_message_size.Collect(&result->http2_send_message_size);
data.http2_metadata_size.Collect(&result->http2_metadata_size);
data.http2_hpack_entry_lifetime.Collect(
&result->http2_hpack_entry_lifetime);
data.wrr_subchannel_list_size.Collect(&result->wrr_subchannel_list_size);
data.wrr_subchannel_ready_size.Collect(&result->wrr_subchannel_ready_size);
data.work_serializer_run_time_ms.Collect(
@ -664,6 +727,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_transport_stalls =
http2_transport_stalls - other.http2_transport_stalls;
result->http2_stream_stalls = http2_stream_stalls - other.http2_stream_stalls;
result->http2_hpack_hits = http2_hpack_hits - other.http2_hpack_hits;
result->http2_hpack_misses = http2_hpack_misses - other.http2_hpack_misses;
result->cq_pluck_creates = cq_pluck_creates - other.cq_pluck_creates;
result->cq_next_creates = cq_next_creates - other.cq_next_creates;
result->cq_callback_creates = cq_callback_creates - other.cq_callback_creates;
@ -695,6 +760,8 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
result->http2_send_message_size =
http2_send_message_size - other.http2_send_message_size;
result->http2_metadata_size = http2_metadata_size - other.http2_metadata_size;
result->http2_hpack_entry_lifetime =
http2_hpack_entry_lifetime - other.http2_hpack_entry_lifetime;
result->wrr_subchannel_list_size =
wrr_subchannel_list_size - other.wrr_subchannel_list_size;
result->wrr_subchannel_ready_size =

@ -35,6 +35,7 @@ class Histogram_100000_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100000_20 operator-(const Histogram_100000_20& left,
const Histogram_100000_20& right);
@ -58,6 +59,7 @@ class Histogram_65536_26 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 26; }
friend Histogram_65536_26 operator-(const Histogram_65536_26& left,
const Histogram_65536_26& right);
@ -81,6 +83,7 @@ class Histogram_100_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_100_20 operator-(const Histogram_100_20& left,
const Histogram_100_20& right);
@ -104,6 +107,7 @@ class Histogram_16777216_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_16777216_20 operator-(const Histogram_16777216_20& left,
const Histogram_16777216_20& right);
@ -127,6 +131,7 @@ class Histogram_80_10 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 10; }
friend Histogram_80_10 operator-(const Histogram_80_10& left,
const Histogram_80_10& right);
@ -150,6 +155,7 @@ class Histogram_10000_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 20; }
friend Histogram_10000_20 operator-(const Histogram_10000_20& left,
const Histogram_10000_20& right);
@ -168,6 +174,30 @@ class HistogramCollector_10000_20 {
private:
std::atomic<uint64_t> buckets_[20]{};
};
class HistogramCollector_1800000_40;
class Histogram_1800000_40 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
size_t bucket_count() const { return 40; }
friend Histogram_1800000_40 operator-(const Histogram_1800000_40& left,
const Histogram_1800000_40& right);
private:
friend class HistogramCollector_1800000_40;
uint64_t buckets_[40]{};
};
class HistogramCollector_1800000_40 {
public:
void Increment(int value) {
buckets_[Histogram_1800000_40::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_1800000_40* result) const;
private:
std::atomic<uint64_t> buckets_[40]{};
};
struct GlobalStats {
enum class Counter {
kClientCallsCreated,
@ -185,6 +215,8 @@ struct GlobalStats {
kHttp2WritesBegun,
kHttp2TransportStalls,
kHttp2StreamStalls,
kHttp2HpackHits,
kHttp2HpackMisses,
kCqPluckCreates,
kCqNextCreates,
kCqCallbackCreates,
@ -213,6 +245,7 @@ struct GlobalStats {
kTcpReadOfferIovSize,
kHttp2SendMessageSize,
kHttp2MetadataSize,
kHttp2HpackEntryLifetime,
kWrrSubchannelListSize,
kWrrSubchannelReadySize,
kWorkSerializerRunTimeMs,
@ -259,6 +292,8 @@ struct GlobalStats {
uint64_t http2_writes_begun;
uint64_t http2_transport_stalls;
uint64_t http2_stream_stalls;
uint64_t http2_hpack_hits;
uint64_t http2_hpack_misses;
uint64_t cq_pluck_creates;
uint64_t cq_next_creates;
uint64_t cq_callback_creates;
@ -287,6 +322,7 @@ struct GlobalStats {
Histogram_80_10 tcp_read_offer_iov_size;
Histogram_16777216_20 http2_send_message_size;
Histogram_65536_26 http2_metadata_size;
Histogram_1800000_40 http2_hpack_entry_lifetime;
Histogram_10000_20 wrr_subchannel_list_size;
Histogram_10000_20 wrr_subchannel_ready_size;
Histogram_100000_20 work_serializer_run_time_ms;
@ -367,6 +403,12 @@ class GlobalStatsCollector {
data_.this_cpu().http2_stream_stalls.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementHttp2HpackHits() {
data_.this_cpu().http2_hpack_hits.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2HpackMisses() {
data_.this_cpu().http2_hpack_misses.fetch_add(1, std::memory_order_relaxed);
}
void IncrementCqPluckCreates() {
data_.this_cpu().cq_pluck_creates.fetch_add(1, std::memory_order_relaxed);
}
@ -447,6 +489,9 @@ class GlobalStatsCollector {
void IncrementHttp2MetadataSize(int value) {
data_.this_cpu().http2_metadata_size.Increment(value);
}
void IncrementHttp2HpackEntryLifetime(int value) {
data_.this_cpu().http2_hpack_entry_lifetime.Increment(value);
}
void IncrementWrrSubchannelListSize(int value) {
data_.this_cpu().wrr_subchannel_list_size.Increment(value);
}
@ -526,6 +571,8 @@ class GlobalStatsCollector {
std::atomic<uint64_t> http2_writes_begun{0};
std::atomic<uint64_t> http2_transport_stalls{0};
std::atomic<uint64_t> http2_stream_stalls{0};
std::atomic<uint64_t> http2_hpack_hits{0};
std::atomic<uint64_t> http2_hpack_misses{0};
std::atomic<uint64_t> cq_pluck_creates{0};
std::atomic<uint64_t> cq_next_creates{0};
std::atomic<uint64_t> cq_callback_creates{0};
@ -551,6 +598,7 @@ class GlobalStatsCollector {
HistogramCollector_80_10 tcp_read_offer_iov_size;
HistogramCollector_16777216_20 http2_send_message_size;
HistogramCollector_65536_26 http2_metadata_size;
HistogramCollector_1800000_40 http2_hpack_entry_lifetime;
HistogramCollector_10000_20 wrr_subchannel_list_size;
HistogramCollector_10000_20 wrr_subchannel_ready_size;
HistogramCollector_100000_20 work_serializer_run_time_ms;

@ -80,6 +80,14 @@
max: 65536
buckets: 26
doc: Number of bytes consumed by metadata, according to HPACK accounting rules
- counter: http2_hpack_hits
doc: Number of HPACK cache hits
- counter: http2_hpack_misses
doc: Number of HPACK cache misses (entries added but never used)
- histogram: http2_hpack_entry_lifetime
doc: Lifetime of HPACK entries in the cache (in milliseconds)
max: 1800000
buckets: 40
# completion queues
- counter: cq_pluck_creates
doc: Number of completion queues created for cq_pluck (indicates sync api usage)

@ -28,6 +28,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/transport_security_grpc.h"
@ -210,6 +211,8 @@ static tsi_result tsi_fake_frame_decode(const unsigned char* incoming_bytes,
frame->offset += to_read_size;
available_size -= to_read_size;
frame->size = load32_little_endian(frame->data);
if (frame->size < 4) return TSI_DATA_CORRUPTED;
if (frame->size > 16 * 1024 * 1024) return TSI_DATA_CORRUPTED;
tsi_fake_frame_ensure_size(frame);
}

@ -0,0 +1,86 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#define GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
namespace grpc_core {
// Like std::unique_ptr, but also includes a small bitset stored in the lower
// bits of the underlying T*.
template <typename T, size_t kBits>
class UniquePtrWithBitset {
public:
UniquePtrWithBitset() : p_(0) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::nullptr_t) : p_(0) {}
explicit UniquePtrWithBitset(T* p) : p_(reinterpret_cast<uintptr_t>(p)) {}
// NOLINTNEXTLINE(google-explicit-constructor)
UniquePtrWithBitset(std::unique_ptr<T>&& p)
: UniquePtrWithBitset(p.release()) {}
~UniquePtrWithBitset() {
DCHECK_LE(kBits, static_cast<size_t>(absl::countr_zero(alignof(T))));
delete get();
}
UniquePtrWithBitset(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset& operator=(const UniquePtrWithBitset&) = delete;
UniquePtrWithBitset(UniquePtrWithBitset&& other) noexcept
: p_(std::exchange(other.p_, 0)) {}
UniquePtrWithBitset& operator=(UniquePtrWithBitset&& other) noexcept {
p_ = std::exchange(other.p_, 0);
return *this;
}
T* get() const { return reinterpret_cast<T*>(p_ & ~kBitMask); }
T* operator->() const { return get(); }
T& operator*() const { return *get(); }
explicit operator bool() const { return get() != nullptr; }
void reset(T* p = nullptr) {
uintptr_t bits = p_ & kBitMask;
delete get();
p_ = reinterpret_cast<uintptr_t>(p) | bits;
}
void SetBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ |= 1 << bit;
}
void ClearBit(size_t bit) {
DCHECK_LT(bit, kBits);
p_ &= ~(1 << bit);
}
bool TestBit(size_t bit) const {
DCHECK_LT(bit, kBits);
return p_ & (1 << bit);
}
friend bool operator==(const UniquePtrWithBitset& a,
const UniquePtrWithBitset& b) {
return a.p_ == b.p_;
}
private:
static constexpr uintptr_t kBitMask = (1 << kBits) - 1;
uintptr_t p_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_UTIL_UNIQUE_PTR_WITH_BITSET_H

@ -1564,8 +1564,9 @@ XdsClient::XdsClient(
}
CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) {
LOG(INFO) << "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
}
}

@ -44,8 +44,8 @@ module GRPC
include Core::CallOps
extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
:trailing_metadata, :status
def_delegators :@call, :cancel, :cancel_with_status, :metadata,
:write_flag, :write_flag=, :trailing_metadata, :status
# client_invoke begins a client invocation.
#
@ -620,6 +620,8 @@ module GRPC
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
def start_call(metadata = {})
# TODO(apolcyn): we should cancel and clean up the call in case this
# send initial MD op fails.
merge_metadata_to_send(metadata) && send_initial_metadata
end
@ -665,9 +667,10 @@ module GRPC
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
# TODO(apolcyn): expose peer getter
Operation = view_class(:cancel, :cancel_with_status, :cancelled?, :deadline,
:execute, :metadata, :status, :start_call, :wait,
:write_flag, :write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline

@ -90,88 +90,101 @@ describe GRPC::Core::Call do
describe '#status' do
it 'can save the status and read it back' do
call = make_test_call
sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts)
make_test_call do |call|
sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts)
end
end
it 'must be set to a status' do
call = make_test_call
bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError)
make_test_call do |call|
bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
expect { call.status = nil }.not_to raise_error
make_test_call do |call|
expect { call.status = nil }.not_to raise_error
end
end
end
describe '#metadata' do
it 'can save the metadata hash and read it back' do
call = make_test_call
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
make_test_call do |call|
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
end
end
it 'must be set with a hash' do
call = make_test_call
bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError)
make_test_call do |call|
bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
expect { call.metadata = nil }.not_to raise_error
make_test_call do |call|
expect { call.metadata = nil }.not_to raise_error
end
end
end
describe '#set_credentials!' do
it 'can set a valid CallCredentials object' do
call = make_test_call
auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error
make_test_call do |call|
auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error
end
end
end
describe '#cancel' do
it 'completes ok' do
call = make_test_call
expect { call.cancel }.not_to raise_error
make_test_call do |call|
expect { call.cancel }.not_to raise_error
end
end
it 'completes ok when the call is closed' do
call = make_test_call
call.close
expect { call.cancel }.not_to raise_error
make_test_call do |call|
call.close
expect { call.cancel }.not_to raise_error
end
end
end
describe '#cancel_with_status' do
it 'completes ok' do
call = make_test_call
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
expect do
call.cancel_with_status(0, nil)
end.to raise_error(TypeError)
make_test_call do |call|
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
expect do
call.cancel_with_status(0, nil)
end.to raise_error(TypeError)
end
end
it 'completes ok when the call is closed' do
call = make_test_call
call.close
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
make_test_call do |call|
call.close
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
end
end
end
def make_test_call
@ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = @ch.create_call(nil, nil, 'phony_method', nil, deadline)
yield call
call.close
end
def deadline

@ -118,7 +118,8 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
call.close
end
expect(&blk).to_not raise_error
end
@ -132,8 +133,9 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
STDERR.puts "#{Time.now}: created call"
call.close
end
expect(&blk).to raise_error(RuntimeError)
STDERR.puts "#{Time.now}: finished: raises an error if called on a closed channel"

@ -16,36 +16,8 @@ require 'spec_helper'
include GRPC::Core
shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
def deadline
Time.now + 5
end
def server_allows_client_to_proceed(metadata = {})
recvd_rpc = @server.request_call
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
ops = { CallOps::SEND_INITIAL_METADATA => metadata }
server_batch = server_call.run_batch(ops)
expect(server_batch.send_metadata).to be true
server_call
end
def new_client_call
@ch.create_call(nil, nil, '/method', nil, deadline)
end
def ok_status
Struct::Status.new(StatusCodes::OK, 'OK')
end
end
shared_examples 'basic GRPC message delivery is OK' do
include GRPC::Core
include_context 'setup: tags'
context 'the test channel' do
it 'should have a target' do
@ -53,272 +25,45 @@ shared_examples 'basic GRPC message delivery is OK' do
end
end
context 'a client call' do
it 'should have a peer' do
expect(new_client_call.peer).to be_a(String)
end
end
it 'calls have peer info' do
call = new_client_call
expect(call.peer).to be_a(String)
end
it 'servers receive requests from clients and can respond' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'responses written by servers are received by the client' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq(reply_text)
expect(final_client_batch.status.code).to eq(0)
end
it 'compressed messages can be sent and received' do
call = new_client_call
server_call = nil
long_request_str = '0' * 2000
long_response_str = '1' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
server_thread = Thread.new do
server_call = server_allows_client_to_proceed(md)
it 'unary calls work' do
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_MESSAGE => long_request_str,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(long_request_str)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => long_response_str,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq long_response_str
expect(final_client_batch.status.code).to eq(0)
end
it 'servers can ignore a client write and send a status' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
it 'unary calls work when enabling compression' do
run_services_on_server(@server, services: [EchoService]) do
long_request_str = '0' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
call = @stub.an_rpc(EchoMsg.new(msg: long_request_str),
return_op: true,
metadata: md)
response = call.execute
expect(response).to be_a(EchoMsg)
expect(response.msg).to eq(long_request_str)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
the_status = Struct::Status.new(StatusCodes::OK, 'OK')
server_thread.join
server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq nil
expect(server_batch.send_status).to be true
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'completes calls by sending status to client and server' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq sent_message
server_ops = {
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_status).to be true
expect(server_batch.send_message).to be true
# confirm the client can receive the server response and status.
client_ops = {
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.send_close).to be true
expect(final_client_batch.message).to eq reply_text
expect(final_client_batch.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
final_server_batch = server_call.run_batch(server_ops)
expect(final_server_batch.send_close).to be true
end
def client_cancel_test(cancel_proc, expected_code,
expected_details)
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
call = @stub.an_rpc(EchoMsg.new, return_op: true)
run_services_on_server(@server, services: [EchoService]) do
# start the call, but don't send a message yet
call.start_call
# cancel the call
cancel_proc.call(call)
# check the client's status
failed = false
begin
call.execute
rescue GRPC::BadStatus => e
failed = true
expect(e.code).to be expected_code
expect(e.details).to eq expected_details
end
expect(failed).to be(true)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::RECV_INITIAL_METADATA => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.metadata).to eq({})
cancel_proc.call(call)
server_thread.join
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
client_ops = {
CallOps::RECV_STATUS_ON_CLIENT => {}
}
client_batch = call.run_batch(client_ops)
expect(client_batch.status.code).to be expected_code
expect(client_batch.status.details).to eq expected_details
end
it 'clients can cancel a call on the server' do
@ -344,8 +89,6 @@ shared_examples 'basic GRPC message delivery is OK' do
end
shared_examples 'GRPC metadata delivery works OK' do
include_context 'setup: tags'
describe 'from client => server' do
before(:example) do
n = 7 # arbitrary number of metadata
@ -364,53 +107,31 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
call.run_batch(client_ops)
# NOTE: no need to run a server in this test b/c the failure
# happens while validating metadata to send.
failed = false
begin
@stub.an_rpc(EchoMsg.new, metadata: md)
rescue TypeError => e
failed = true
expect(e.message).to eq('grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
expect(failed).to be(true)
end
end
it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each_with_index do |md, i|
expect(@stub.an_rpc(EchoMsg.new, metadata: md)).to be_a(EchoMsg)
# confirm the server can receive the client metadata
# finish the call
expect(service.received_md.length).to eq(i + 1)
md.each do |k, v|
expect(service.received_md[i][k.to_s]).to eq(v)
end
end
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
# finish the call
final_server_batch = recvd_rpc.call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
end
end
@ -432,120 +153,61 @@ shared_examples 'GRPC metadata delivery works OK' do
end
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil
}
call.run_batch(client_ops)
# server gets the invocation
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
recvd_rpc.call.run_batch(server_ops)
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@bad_keys.each do |md|
proceed = Queue.new
server_exception = nil
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
rescue TypeError => e
server_exception = e
ensure
proceed.push(1)
end
client_exception = nil
client_call = @stub.an_rpc(EchoMsg.new, return_op: true)
thr = Thread.new do
client_call.execute
rescue GRPC::BadStatus => e
client_exception = e
end
proceed.pop
# TODO(apolcyn): we shouldn't need this cancel here. It's
# only currently needed b/c the server does not seem to properly
# terminate the RPC if it fails to send initial metadata. That
# should be fixed, in which case this cancellation can be removed.
client_call.cancel
thr.join
p client_exception
expect(client_exception.nil?).to be(false)
expect(server_exception.nil?).to be(false)
expect(server_exception.message).to eq(
'grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
# cancel the call so the server can shut down immediately
call.cancel
end
end
it 'sends an empty hash if no metadata is added' do
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
expect(call.metadata).to eq({})
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
# receive close and send status to finish the call
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
# receive status to finish the call
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'sends all the pairs when keys and values are valid' do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each do |md|
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
end
call = @stub.an_rpc(EchoMsg.new, return_op: true)
call.execute
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(call.metadata).to eq(replace_symbols)
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(final_client_batch.metadata).to eq(replace_symbols)
expect(final_client_batch.status.code).to eq(0)
end
end
end
@ -554,16 +216,11 @@ end
describe 'the http client/server' do
before(:example) do
server_host = '0.0.0.0:0'
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
@server.start
@ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
end
after(:example) do
@ch.close
@server.shutdown_and_notify(deadline)
@server.close
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -574,8 +231,6 @@ describe 'the http client/server' do
end
describe 'the secure http client/server' do
include_context 'setup: tags'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'server1.key', 'server1.pem']
@ -587,17 +242,14 @@ describe 'the secure http client/server' do
server_host = '0.0.0.0:0'
server_creds = GRPC::Core::ServerCredentials.new(
nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, server_creds)
@server.start
args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
@ch = Channel.new("0.0.0.0:#{server_port}", args,
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
end
after(:example) do
@server.shutdown_and_notify(deadline)
@server.close
@ch = Channel.new(
"0.0.0.0:#{server_port}", args,
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -606,59 +258,25 @@ describe 'the secure http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do
end
def credentials_update_test(creds_update_md)
auth_proc = proc { creds_update_md }
it 'modifies metadata with CallCredentials' do
# create call creds
auth_proc = proc { { 'k1' => 'v1' } }
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
initial_md_key = 'k2'
initial_md_val = 'v2'
initial_md = { initial_md_key => initial_md_val }
expected_md = creds_update_md.clone
fail 'bad test param' unless expected_md[initial_md_key].nil?
expected_md[initial_md_key] = initial_md_val
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
# create arbitrary custom metadata
custom_md = { 'k2' => 'v2' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = { 'k1' => 'v1', 'k2' => 'v2' }
expected_md.each do |k, v|
expect(echo_service.received_md[0][k]).to eq(v)
end
call = new_client_call
call.set_credentials! call_creds
client_batch = call.run_batch(
CallOps::SEND_INITIAL_METADATA => initial_md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[expected_md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
credentials_update_test_finish_call(call, recvd_rpc.call)
end
def credentials_update_test_finish_call(client_call, server_call)
final_server_batch = server_call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = client_call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'modifies metadata with CallCredentials' do
credentials_update_test('k1' => 'updated-v1')
end
it 'modifies large metadata with CallCredentials' do
@ -666,11 +284,34 @@ describe 'the secure http client/server' do
'00000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111',
)
md = {
k3: val_array,
k4: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey5: 'v1'
# create call creds
auth_proc = proc do
{
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
end
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
# create arbitrary custom metadata
custom_md = { k1: 'v1' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = {
k1: 'v1',
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
credentials_update_test(md)
expected_md.each do |k, v|
expect(echo_service.received_md[0][k.to_s]).to eq(v)
end
end
end

@ -55,17 +55,20 @@ describe GRPC::ActiveCall do
end
@ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
:this_channel_is_insecure)
@call = make_test_call
end
after(:each) do
@server.shutdown_and_notify(deadline)
@server.close
@server_thread.join
# Don't rely on GC to unref the call, since that can prevent
# the channel connectivity state polling thread from shutting down.
@call.close
end
describe 'restricted view methods' do
before(:each) do
@call = make_test_call
ActiveCall.client_invoke(@call)
@client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
@ -117,9 +120,8 @@ describe GRPC::ActiveCall do
describe '#remote_send' do
it 'allows a client to send a payload to the server', test: true do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -137,15 +139,14 @@ describe GRPC::ActiveCall do
expect(server_call.remote_read).to eq(msg)
# finish the call
server_call.send_initial_metadata
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
it 'marshals the payload using the marshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal, @pass_through, deadline)
client_call = ActiveCall.new(@call, marshal, @pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -161,23 +162,22 @@ describe GRPC::ActiveCall do
metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg)
# finish the call
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS]
TEST_WRITE_FLAGS.each do |f|
it "successfully makes calls with write_flag set to #{f}" do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal,
client_call = ActiveCall.new(@call, marshal,
@pass_through, deadline)
msg = 'message is a string'
client_call.write_flag = f
client_call.remote_send(msg)
# flush the message in case writes are set to buffered
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
# confirm that the message was marshalled
recvd_rpc = @received_rpcs_queue.pop
@ -199,9 +199,8 @@ describe GRPC::ActiveCall do
describe 'sending initial metadata', send_initial_metadata: true do
it 'sends metadata before sending a message if it hasnt been sent yet' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -213,13 +212,13 @@ describe GRPC::ActiveCall do
message = 'phony message'
expect(call).to(
expect(@call).to(
receive(:run_batch)
.with(
hash_including(
CallOps::SEND_INITIAL_METADATA => metadata)).once)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_MESSAGE => message)).once)
@client_call.remote_send(message)
@ -228,14 +227,12 @@ describe GRPC::ActiveCall do
end
it 'doesnt send metadata if it thinks its already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
expect(@client_call.metadata_sent).to eql(true)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA)).never)
@ -243,9 +240,7 @@ describe GRPC::ActiveCall do
end
it 'sends metadata if it is explicitly sent and ok to do so' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline,
@ -257,7 +252,7 @@ describe GRPC::ActiveCall do
@client_call.merge_metadata_to_send(metadata)
expect(@client_call.metadata_to_send).to eq(metadata)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA =>
metadata)).once)
@ -265,9 +260,7 @@ describe GRPC::ActiveCall do
end
it 'explicit sending does nothing if metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
@ -284,7 +277,6 @@ describe GRPC::ActiveCall do
describe '#merge_metadata_to_send', merge_metadata_to_send: true do
it 'adds to existing metadata when there is existing metadata to send' do
call = make_test_call
starting_metadata = {
k1: 'key1_val',
k2: 'key2_val',
@ -292,7 +284,7 @@ describe GRPC::ActiveCall do
}
@client_call = ActiveCall.new(
call,
@call,
@pass_through, @pass_through,
deadline,
started: false,
@ -318,9 +310,8 @@ describe GRPC::ActiveCall do
end
it 'fails when initial metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -338,9 +329,8 @@ describe GRPC::ActiveCall do
describe '#client_invoke' do
it 'sends metadata to the server when present' do
call = make_test_call
metadata = { k1: 'v1', k2: 'v2' }
ActiveCall.client_invoke(call, metadata)
ActiveCall.client_invoke(@call, metadata)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
expect(recvd_call).to_not be_nil
@ -349,15 +339,14 @@ describe GRPC::ActiveCall do
expect(recvd_rpc.metadata['k2']).to eq('v2')
# finish the call
recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {})
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
end
describe '#send_status', send_status: true do
it 'works when no metadata or messages have been sent yet' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
server_call = ActiveCall.new(
@ -375,9 +364,8 @@ describe GRPC::ActiveCall do
describe '#remote_read', remote_read: true do
it 'reads the response sent by a server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -385,13 +373,12 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves no metadata when the server adds no metadata' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -401,13 +388,12 @@ describe GRPC::ActiveCall do
client_call.remote_read
expect(client_call.metadata).to eq({})
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves metadata add by the server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -418,12 +404,11 @@ describe GRPC::ActiveCall do
expected = { 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected)
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'get a status from server when nothing else sent from server' do
client_call = make_test_call
ActiveCall.client_invoke(client_call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
@ -438,22 +423,21 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'OK')
# Check that we can receive initial metadata and a status
client_call.run_batch(
@call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil)
batch_result = client_call.run_batch(
batch_result = @call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(OK)
end
it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
server_call.send_status(OK, 'OK')
@ -463,10 +447,9 @@ describe GRPC::ActiveCall do
end
it 'unmarshals the response using the unmarshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
unmarshal, deadline)
# confirm the client receives the unmarshalled message
@ -476,14 +459,13 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
end
describe '#each_remote_read' do
it 'creates an Enumerator' do
call = make_test_call
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
expect(client_call.each_remote_read).to be_a(Enumerator)
# finish the call
@ -491,9 +473,8 @@ describe GRPC::ActiveCall do
end
it 'the returned enumerator can read n responses' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
@ -506,18 +487,17 @@ describe GRPC::ActiveCall do
expect(e.next).to eq(reply)
end
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
e = client_call.each_remote_read
n = 3 # arbitrary value > 1
@ -532,14 +512,13 @@ describe GRPC::ActiveCall do
describe '#closing the call from the client' do
it 'finishes ok if the server sends a status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
@ -549,9 +528,8 @@ describe GRPC::ActiveCall do
end
it 'finishes ok if the server sends an early status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -560,15 +538,14 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error
end
it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -577,7 +554,7 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(
@call.run_batch(
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
end.to_not raise_error
@ -631,6 +608,7 @@ describe GRPC::ActiveCall do
batch_result = @client_call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(@server_status)
@client_call.close
end
it 'sends the initial metadata implicitly if not already sent' do

@ -41,14 +41,17 @@ class EchoService
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg
attr_reader :received_md
attr_accessor :on_call_started
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
@on_call_started = nil
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
on_call_started&.call(call)
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req

@ -168,6 +168,25 @@ grpc_proto_fuzzer(
],
)
grpc_proto_fuzzer(
name = "server_fuzzer_chttp2_fake_creds",
srcs = ["server_fuzzer_chttp2_fake_creds.cc"],
corpus = "server_fuzzer_chttp2_fake_creds_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":server_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "server_fuzzer_chaotic_good",
srcs = ["server_fuzzer_chaotic_good.cc"],
@ -187,3 +206,60 @@ grpc_proto_fuzzer(
"//src/core:chaotic_good_server",
],
)
grpc_cc_library(
name = "connector_fuzzer",
srcs = ["connector_fuzzer.cc"],
hdrs = ["connector_fuzzer.h"],
external_deps = ["absl/log:check"],
deps = [
"fuzzer_input_proto",
"fuzzing_common",
"network_input",
"//:gpr",
"//:grpc",
"//src/core:channel_args",
"//test/core/event_engine/fuzzing_event_engine",
"//test/core/test_util:fuzz_config_vars",
"//test/core/test_util:grpc_test_util",
"//test/core/test_util:grpc_test_util_base",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2",
srcs = ["connector_fuzzer_chttp2.cc"],
corpus = "connector_fuzzer_chttp2_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2_fakesec",
srcs = ["connector_fuzzer_chttp2_fakesec.cc"],
corpus = "connector_fuzzer_chttp2_fakesec_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)

@ -0,0 +1,189 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
#include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/env.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
#include "test/core/end2end/fuzzers/network_input.h"
#include "test/core/test_util/fuzz_config_vars.h"
#include "test/core/test_util/test_config.h"
bool squelch = true;
bool leak_check = true;
using ::grpc_event_engine::experimental::ChannelArgsEndpointConfig;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::FuzzingEventEngine;
using ::grpc_event_engine::experimental::GetDefaultEventEngine;
using ::grpc_event_engine::experimental::MockEndpointController;
using ::grpc_event_engine::experimental::SetEventEngineFactory;
using ::grpc_event_engine::experimental::URIToResolvedAddress;
namespace grpc_core {
namespace {
class ConnectorFuzzer {
public:
ConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector)
: make_security_connector_(make_security_connector),
engine_([actions = msg.event_engine_actions()]() {
SetEventEngineFactory([actions]() -> std::unique_ptr<EventEngine> {
return std::make_unique<FuzzingEventEngine>(
FuzzingEventEngine::Options(), actions);
});
return std::dynamic_pointer_cast<FuzzingEventEngine>(
GetDefaultEventEngine());
}()),
mock_endpoint_controller_(MockEndpointController::Create(engine_)),
connector_(make_connector()) {
CHECK(engine_);
for (const auto& input : msg.network_input()) {
network_inputs_.push(input);
}
grpc_timer_manager_set_start_threaded(false);
grpc_init();
ExecCtx exec_ctx;
Executor::SetThreadingAll(false);
listener_ =
engine_
->CreateListener(
[this](std::unique_ptr<EventEngine::Endpoint> endpoint,
MemoryAllocator) {
if (network_inputs_.empty()) return;
ScheduleWrites(network_inputs_.front(), std::move(endpoint),
engine_.get());
network_inputs_.pop();
},
[](absl::Status) {}, ChannelArgsEndpointConfig(ChannelArgs{}),
std::make_unique<MemoryQuota>("foo"))
.value();
if (msg.has_shutdown_connector() &&
msg.shutdown_connector().delay_ms() > 0) {
auto shutdown_connector = msg.shutdown_connector();
const auto delay = Duration::Milliseconds(shutdown_connector.delay_ms());
engine_->RunAfterExactly(delay, [this, shutdown_connector = std::move(
shutdown_connector)]() {
if (connector_ == nullptr) return;
connector_->Shutdown(absl::Status(
static_cast<absl::StatusCode>(shutdown_connector.shutdown_status()),
shutdown_connector.shutdown_message()));
});
}
// Abbreviated runtime for interpreting API actions, since we simply don't
// support many here.
uint64_t when_ms = 0;
for (const auto& action : msg.api_actions()) {
switch (action.type_case()) {
default:
break;
case api_fuzzer::Action::kSleepMs:
when_ms += action.sleep_ms();
break;
case api_fuzzer::Action::kResizeResourceQuota:
engine_->RunAfterExactly(
Duration::Milliseconds(when_ms),
[this, new_size = action.resize_resource_quota()]() {
resource_quota_->memory_quota()->SetSize(new_size);
});
when_ms += 1;
break;
}
}
}
~ConnectorFuzzer() {
listener_.reset();
connector_.reset();
mock_endpoint_controller_.reset();
engine_->TickUntilIdle();
grpc_shutdown_blocking();
engine_->UnsetGlobalHooks();
}
void Run() {
grpc_resolved_address addr;
CHECK(grpc_parse_uri(URI::Parse("ipv4:127.0.0.1:1234").value(), &addr));
CHECK_OK(
listener_->Bind(URIToResolvedAddress("ipv4:127.0.0.1:1234").value()));
CHECK_OK(listener_->Start());
OrphanablePtr<grpc_endpoint> endpoint(
mock_endpoint_controller_->TakeCEndpoint());
SubchannelConnector::Result result;
bool done = false;
auto channel_args = ChannelArgs{}.SetObject<EventEngine>(engine_).SetObject(
resource_quota_);
auto security_connector = make_security_connector_();
if (security_connector != nullptr) {
channel_args = channel_args.SetObject(std::move(security_connector));
}
connector_->Connect(
SubchannelConnector::Args{&addr, nullptr,
Timestamp::Now() + Duration::Seconds(20),
channel_args},
&result, NewClosure([&done, &result](grpc_error_handle status) {
done = true;
if (status.ok()) result.transport->Orphan();
}));
while (!done) {
engine_->Tick();
grpc_timer_manager_tick();
}
}
private:
RefCountedPtr<ResourceQuota> resource_quota_ =
MakeRefCounted<ResourceQuota>("fuzzer");
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector_;
std::shared_ptr<FuzzingEventEngine> engine_;
std::queue<fuzzer_input::NetworkInput> network_inputs_;
std::shared_ptr<MockEndpointController> mock_endpoint_controller_;
std::unique_ptr<EventEngine::Listener> listener_;
OrphanablePtr<SubchannelConnector> connector_;
};
} // namespace
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector) {
if (squelch && !GetEnv("GRPC_TRACE_FUZZER").has_value()) {
grpc_disable_all_absl_logs();
}
static const int once = []() {
ForceEnableExperiment("event_engine_client", true);
ForceEnableExperiment("event_engine_listener", true);
return 42;
}();
CHECK_EQ(once, 42); // avoid unused variable warning
ApplyFuzzConfigVars(msg.config_vars());
TestOnlyReloadExperimentsFromConfigVariables();
ConnectorFuzzer(msg, make_security_connector, make_connector).Run();
}
} // namespace grpc_core

@ -0,0 +1,34 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#define GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#include "absl/functional/function_ref.h"
#include "src/core/client_channel/connector.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
namespace grpc_core {
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector);
}
#endif // GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_core::RefCountedPtr<grpc_channel_security_connector>();
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -0,0 +1,36 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/security_connector/fake/fake_security_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_fake_channel_security_connector_create(
grpc_core::RefCountedPtr<grpc_channel_credentials>(
grpc_fake_transport_security_credentials_create()),
nullptr, "foobar", grpc_core::ChannelArgs{});
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -172,6 +172,20 @@ message ChaoticGoodFrame {
message ChaoticGoodSettings {}
message FakeTransportFrame {
enum MessageString {
CLIENT_INIT = 0;
SERVER_INIT = 1;
CLIENT_FINISHED = 2;
SERVER_FINISHED = 3;
}
oneof payload {
bytes raw_bytes = 1;
MessageString message_string = 2;
}
}
message InputSegment {
int32 delay_ms = 1;
oneof payload {
@ -187,6 +201,7 @@ message InputSegment {
H2ClientPrefix client_prefix = 11;
uint32 repeated_zeros = 12;
ChaoticGoodFrame chaotic_good = 13;
FakeTransportFrame fake_transport_frame = 14;
}
}
@ -204,10 +219,18 @@ message NetworkInput {
}
}
// Only for connector fuzzer, when to drop the connector
message ShutdownConnector {
int32 delay_ms = 1;
int32 shutdown_status = 2;
string shutdown_message = 3;
}
message Msg {
repeated NetworkInput network_input = 1;
repeated api_fuzzer.Action api_actions = 2;
fuzzing_event_engine.Actions event_engine_actions = 3;
grpc.testing.FuzzConfigVars config_vars = 4;
grpc.testing.FuzzingChannelArgs channel_args = 5;
ShutdownConnector shutdown_connector = 6;
}

@ -267,6 +267,13 @@ SliceBuffer ChaoticGoodFrame(const fuzzer_input::ChaoticGoodFrame& frame) {
return out;
}
void store32_little_endian(uint32_t value, unsigned char* buf) {
buf[3] = static_cast<unsigned char>((value >> 24) & 0xFF);
buf[2] = static_cast<unsigned char>((value >> 16) & 0xFF);
buf[1] = static_cast<unsigned char>((value >> 8) & 0xFF);
buf[0] = static_cast<unsigned char>((value) & 0xFF);
}
grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
switch (segment.payload_case()) {
case fuzzer_input::InputSegment::kRawBytes:
@ -333,6 +340,38 @@ grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
.JoinIntoSlice()
.TakeCSlice();
} break;
case fuzzer_input::InputSegment::kFakeTransportFrame: {
auto generate = [](absl::string_view payload) {
uint32_t length = payload.length();
std::vector<unsigned char> bytes;
bytes.resize(4);
store32_little_endian(length + 4, bytes.data());
for (auto c : payload) {
bytes.push_back(static_cast<unsigned char>(c));
}
return grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(bytes.data()), bytes.size());
};
switch (segment.fake_transport_frame().payload_case()) {
case fuzzer_input::FakeTransportFrame::kRawBytes:
return generate(segment.fake_transport_frame().raw_bytes());
case fuzzer_input::FakeTransportFrame::kMessageString:
switch (segment.fake_transport_frame().message_string()) {
default:
return generate("UNKNOWN");
case fuzzer_input::FakeTransportFrame::CLIENT_INIT:
return generate("CLIENT_INIT");
case fuzzer_input::FakeTransportFrame::SERVER_INIT:
return generate("SERVER_INIT");
case fuzzer_input::FakeTransportFrame::CLIENT_FINISHED:
return generate("CLIENT_FINISHED");
case fuzzer_input::FakeTransportFrame::SERVER_FINISHED:
return generate("SERVER_FINISHED");
}
case fuzzer_input::FakeTransportFrame::PAYLOAD_NOT_SET:
return generate("");
}
}
case fuzzer_input::InputSegment::PAYLOAD_NOT_SET:
break;
}
@ -545,4 +584,15 @@ Duration ScheduleConnection(
return delay;
}
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine) {
auto schedule = MakeSchedule(network_input);
auto ep = std::shared_ptr<EventEngine::Endpoint>(std::move(endpoint));
ReadForever(ep);
ScheduleWritesForReads(ep, event_engine, std::move(schedule));
}
} // namespace grpc_core

@ -30,6 +30,12 @@ Duration ScheduleReads(
mock_endpoint_controller,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
Duration ScheduleConnection(
const fuzzer_input::NetworkInput& network_input,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine,

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc_security.h>
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/server_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunServerFuzzer(msg, [](grpc_server* server, int port_num,
const grpc_core::ChannelArgs&) {
auto* creds = grpc_fake_transport_security_server_credentials_create();
grpc_server_add_http2_port(
server, absl::StrCat("0.0.0.0:", port_num).c_str(), creds);
grpc_server_credentials_release(creds);
});
}

@ -67,12 +67,10 @@ void CancelAfterClientDone(
}
CORE_END2END_TEST(CoreEnd2endTest, CancelAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<CancelCancellationMode>());
}
CORE_END2END_TEST(CoreDeadlineTest, DeadlineAfterClientDone) {
SKIP_IF_V3();
CancelAfterClientDone(*this, std::make_unique<DeadlineCancellationMode>());
}

@ -18,6 +18,7 @@
#include <stdlib.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <limits>
#include <vector>
@ -32,6 +33,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/telemetry/stats.h"
@ -189,7 +191,15 @@ void FuzzingEventEngine::TickUntilIdle() {
while (true) {
{
grpc_core::MutexLock lock(&*mu_);
if (tasks_by_id_.empty()) return;
LOG_EVERY_N_SEC(INFO, 5)
<< "TickUntilIdle: "
<< GRPC_DUMP_ARGS(tasks_by_id_.size(), outstanding_reads_.load(),
outstanding_writes_.load());
if (tasks_by_id_.empty() &&
outstanding_writes_.load(std::memory_order_relaxed) == 0 &&
outstanding_reads_.load(std::memory_order_relaxed) == 0) {
return;
}
}
Tick();
}
@ -299,6 +309,9 @@ absl::Status FuzzingEventEngine::FuzzingListener::Start() {
bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
CHECK(!closed[index]);
const int peer_index = 1 - index;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: entry "
<< GRPC_DUMP_ARGS(data->Length());
if (data->Length() == 0) return true;
size_t write_len = std::numeric_limits<size_t>::max();
// Check the write_sizes queue for fuzzer imposed restrictions on this write
@ -315,12 +328,16 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
// byte.
if (write_len == 0) write_len = 1;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes";
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes; "
<< GRPC_DUMP_ARGS(pending_read[peer_index].has_value());
// Expand the pending buffer.
size_t prev_len = pending[index].size();
pending[index].resize(prev_len + write_len);
// Move bytes from the to-write data into the pending buffer.
data->MoveFirstNBytesIntoBuffer(write_len, pending[index].data() + prev_len);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: post-move "
<< GRPC_DUMP_ARGS(data->Length());
// If there was a pending read, then we can fulfill it.
if (pending_read[peer_index].has_value()) {
pending_read[peer_index]->buffer->Append(
@ -328,7 +345,11 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
pending[index].clear();
g_fuzzing_event_engine->RunLocked(
RunType::kWrite,
[cb = std::move(pending_read[peer_index]->on_read)]() mutable {
[cb = std::move(pending_read[peer_index]->on_read), this, peer_index,
buffer = pending_read[peer_index]->buffer]() mutable {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "FINISH_READ[" << this << ":" << peer_index
<< "]: " << GRPC_DUMP_ARGS(buffer->Length());
cb(absl::OkStatus());
});
pending_read[peer_index].reset();
@ -339,6 +360,10 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
bool FuzzingEventEngine::FuzzingEndpoint::Write(
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
const WriteArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_WRITE[" << middle_.get() << ":" << my_index()
<< "]: " << data->Length() << " bytes";
IoToken write_token(&g_fuzzing_event_engine->outstanding_writes_);
grpc_core::global_stats().IncrementSyscallWrite();
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
@ -346,24 +371,38 @@ bool FuzzingEventEngine::FuzzingEndpoint::Write(
// If the write succeeds immediately, then we return true.
if (middle_->Write(data, my_index())) return true;
middle_->writing[my_index()] = true;
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data);
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data,
std::move(write_token));
return false;
}
void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data) {
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) {
g_fuzzing_event_engine->RunLocked(
RunType::kWrite, [middle = std::move(middle), index, data,
on_writable = std::move(on_writable)]() mutable {
RunType::kWrite,
[write_token = std::move(write_token), middle = std::move(middle), index,
data, on_writable = std::move(on_writable)]() mutable {
grpc_core::ReleasableMutexLock lock(&*mu_);
CHECK(middle->writing[index]);
if (middle->closed[index]) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED[" << middle.get() << ":" << index << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[on_writable = std::move(on_writable)]() mutable {
on_writable(absl::InternalError("Endpoint closed"));
});
if (middle->pending_read[1 - index].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(
middle->pending_read[1 - index]->on_read)]() mutable {
cb(absl::InternalError("Endpoint closed"));
});
middle->pending_read[1 - index].reset();
}
return;
}
if (middle->Write(data, index)) {
@ -373,14 +412,23 @@ void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
return;
}
ScheduleDelayedWrite(std::move(middle), index, std::move(on_writable),
data);
data, std::move(write_token));
});
}
FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
grpc_core::MutexLock lock(&*mu_);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSE[" << middle_.get() << ":" << my_index() << "]: "
<< GRPC_DUMP_ARGS(
middle_->closed[my_index()], middle_->closed[peer_index()],
middle_->pending_read[my_index()].has_value(),
middle_->pending_read[peer_index()].has_value(),
middle_->writing[my_index()], middle_->writing[peer_index()]);
middle_->closed[my_index()] = true;
if (middle_->pending_read[my_index()].has_value()) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED_READING[" << middle_.get() << ":" << my_index() << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(middle_->pending_read[my_index()]->on_read)]() mutable {
@ -388,7 +436,7 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
});
middle_->pending_read[my_index()].reset();
}
if (!middle_->writing[peer_index()] &&
if (!middle_->writing[my_index()] &&
middle_->pending_read[peer_index()].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
@ -403,20 +451,25 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
bool FuzzingEventEngine::FuzzingEndpoint::Read(
absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer,
const ReadArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_READ[" << middle_.get() << ":" << my_index() << "]";
buffer->Clear();
IoToken read_token(&g_fuzzing_event_engine->outstanding_reads_);
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
if (middle_->pending[peer_index()].empty()) {
// If the endpoint is closed, fail asynchronously.
if (middle_->closed[peer_index()]) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, [on_read = std::move(on_read)]() mutable {
RunType::kRunAfter,
[read_token, on_read = std::move(on_read)]() mutable {
on_read(absl::InternalError("Endpoint closed"));
});
return false;
}
// If the endpoint has no pending data, then we need to wait for a write.
middle_->pending_read[my_index()] = PendingRead{std::move(on_read), buffer};
middle_->pending_read[my_index()] =
PendingRead{std::move(read_token), std::move(on_read), buffer};
return false;
} else {
// If the endpoint has pending data, then we can fulfill the read

@ -17,6 +17,7 @@
#include <stddef.h>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <map>
@ -124,6 +125,36 @@ class FuzzingEventEngine : public EventEngine {
}
private:
class IoToken {
public:
IoToken() : refs_(nullptr) {}
explicit IoToken(std::atomic<size_t>* refs) : refs_(refs) {
refs_->fetch_add(1, std::memory_order_relaxed);
}
~IoToken() {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
}
IoToken(const IoToken& other) : refs_(other.refs_) {
if (refs_ != nullptr) refs_->fetch_add(1, std::memory_order_relaxed);
}
IoToken& operator=(const IoToken& other) {
IoToken copy(other);
Swap(copy);
return *this;
}
IoToken(IoToken&& other) noexcept
: refs_(std::exchange(other.refs_, nullptr)) {}
IoToken& operator=(IoToken&& other) noexcept {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
refs_ = std::exchange(other.refs_, nullptr);
return *this;
}
void Swap(IoToken& other) { std::swap(refs_, other.refs_); }
private:
std::atomic<size_t>* refs_;
};
enum class RunType {
kWrite,
kRunAfter,
@ -183,6 +214,8 @@ class FuzzingEventEngine : public EventEngine {
// One read that's outstanding.
struct PendingRead {
// The associated io token
IoToken io_token;
// Callback to invoke when the read completes.
absl::AnyInvocable<void(absl::Status)> on_read;
// The buffer to read into.
@ -243,8 +276,8 @@ class FuzzingEventEngine : public EventEngine {
// endpoint shutdown, it's believed this is a legal implementation.
static void ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::shared_ptr<EndpointMiddle> middle_;
const int index_;
};
@ -299,6 +332,8 @@ class FuzzingEventEngine : public EventEngine {
std::queue<std::queue<size_t>> write_sizes_for_future_connections_
ABSL_GUARDED_BY(mu_);
grpc_pick_port_functions previous_pick_port_functions_;
std::atomic<size_t> outstanding_writes_{0};
std::atomic<size_t> outstanding_reads_{0};
grpc_core::Mutex run_after_duration_callback_mu_;
absl::AnyInvocable<void(Duration)> run_after_duration_callback_

@ -28,11 +28,12 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/telemetry/stats.h"
#include "test/core/test_util/test_config.h"
namespace grpc_core {
namespace {
void AssertIndex(const HPackTable* tbl, uint32_t idx, const char* key,
void AssertIndex(HPackTable* tbl, uint32_t idx, const char* key,
const char* value) {
const auto* md = tbl->Lookup(idx);
ASSERT_NE(md, nullptr);
@ -113,6 +114,8 @@ TEST(HpackParserTableTest, ManyAdditions) {
ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i);
@ -134,6 +137,56 @@ TEST(HpackParserTableTest, ManyAdditions) {
value.c_str());
}
}
auto stats_after = global_stats().Collect();
EXPECT_EQ(stats_after->http2_hpack_hits - stats_before->http2_hpack_hits,
100000);
EXPECT_EQ(stats_after->http2_hpack_misses, stats_before->http2_hpack_misses);
}
TEST(HpackParserTableTest, ManyUnusedAdditions) {
auto tbl = std::make_unique<HPackTable>();
int i;
ExecCtx exec_ctx;
auto stats_before = global_stats().Collect();
const Timestamp start = Timestamp::Now();
for (i = 0; i < 100000; i++) {
std::string key = absl::StrCat("K.", i);
std::string value = absl::StrCat("VALUE.", i);
auto key_slice = Slice::FromCopiedString(key);
auto value_slice = Slice::FromCopiedString(value);
auto memento = HPackTable::Memento{
ParsedMetadata<grpc_metadata_batch>(
ParsedMetadata<grpc_metadata_batch>::FromSlicePair{},
std::move(key_slice), std::move(value_slice),
key.length() + value.length() + 32),
nullptr};
ASSERT_TRUE(tbl->Add(std::move(memento)));
}
tbl.reset();
auto stats_after = global_stats().Collect();
const Timestamp end = Timestamp::Now();
EXPECT_EQ(stats_after->http2_hpack_hits, stats_before->http2_hpack_hits);
EXPECT_EQ(stats_after->http2_hpack_misses - stats_before->http2_hpack_misses,
100000);
size_t num_buckets_changed = 0;
const auto& lifetime_before = stats_before->http2_hpack_entry_lifetime;
const auto& lifetime_after = stats_after->http2_hpack_entry_lifetime;
for (size_t i = 0; i < lifetime_before.bucket_count(); i++) {
if (lifetime_before.buckets()[i] != lifetime_after.buckets()[i]) {
EXPECT_LE(i, lifetime_before.BucketFor((end - start).millis()));
num_buckets_changed++;
}
}
EXPECT_GT(num_buckets_changed, 0);
}
} // namespace grpc_core

@ -112,6 +112,19 @@ grpc_cc_test(
],
)
grpc_cc_test(
name = "unique_ptr_with_bitset_test",
srcs = ["unique_ptr_with_bitset_test.cc"],
external_deps = ["gtest"],
language = "C++",
uses_event_engine = False,
uses_polling = False,
deps = [
"//:gpr_platform",
"//src/core:unique_ptr_with_bitset",
],
)
grpc_cc_test(
name = "useful_test",
srcs = ["useful_test.cc"],

@ -0,0 +1,60 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#include "src/core/util/unique_ptr_with_bitset.h"
#include <stdint.h>
#include <limits>
#include <memory>
#include "gtest/gtest.h"
#include <grpc/support/port_platform.h>
namespace grpc_core {
TEST(UniquePtrWithBitsetTest, Basic) {
UniquePtrWithBitset<int, 1> ptr;
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(42));
EXPECT_EQ(*ptr, 42);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.SetBit(0);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.reset();
EXPECT_EQ(ptr.get(), nullptr);
EXPECT_EQ(ptr.TestBit(0), true);
ptr.ClearBit(0);
EXPECT_EQ(ptr.TestBit(0), false);
ptr.reset(new int(43));
ptr.SetBit(0);
UniquePtrWithBitset<int, 1> ptr2;
ptr2 = std::move(ptr);
EXPECT_EQ(*ptr2, 43);
EXPECT_EQ(ptr2.TestBit(0), true);
}
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

@ -54,7 +54,6 @@ DOCKERIMAGE_CURRENT_VERSIONS = {
"tools/dockerfile/distribtest/python_python38_buster_aarch64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_python38_buster_aarch64@sha256:0a93bf2a0303aebe1280bafad69df228b9444af9144c767d8169ecc70fb383f6",
"tools/dockerfile/distribtest/python_ubuntu2004_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2004_x64@sha256:288cf72bc98fc384b9352d1f6d258b3513925ffe5746dda7e2e343723dd5f733",
"tools/dockerfile/distribtest/python_ubuntu2204_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/python_ubuntu2204_x64@sha256:6054d639247a93af2b496f3c1ce48f63b2e07f5ba54e025f69bb232a747c644e",
"tools/dockerfile/distribtest/ruby_centos7_x64.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_0.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_0@sha256:05c579d93764f12db1a60fa78a26e0f4d6179e54187a3a531c8ff955001731ec",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_1.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_1@sha256:a48bb08275a588fbcea21b6b6056514b69454f6844bd7db9fd72c796892d02e1",
"tools/dockerfile/distribtest/ruby_debian11_x64_ruby_3_2.current_version": "docker://us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_debian11_x64_ruby_3_2@sha256:9604f8d07c3ea330cdc1ebe394f67828710bbfef52f0dc144e513e3627279b5a",

@ -302,6 +302,10 @@ with open("src/core/telemetry/stats_data.h", "w") as H:
print(" public:", file=H)
print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" size_t bucket_count() const { return %d; }" % shape.buckets,
file=H,
)
print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left,"
" const Histogram_%d_%d& right);"

@ -1 +0,0 @@
us-docker.pkg.dev/grpc-testing/testing-images-public/ruby_centos7_x64:b37e078e920ba1f75bd26bc67c2d3496432e36af@sha256:4d529b984b78ca179086f7f9b416605e2d9a96ca0a28a71f4421bb5ffdc18f96

@ -1,33 +0,0 @@
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM centos:7
RUN yum update -y && yum install -y curl tar which
# Install rvm
RUN gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
RUN curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.7
RUN /bin/bash -l -c "rvm install ruby-2.7"
RUN /bin/bash -l -c "rvm use --default ruby-2.7"
RUN /bin/bash -l -c "echo 'gem: --no-document' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.7' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-document"
RUN mkdir /var/local/jenkins
RUN /bin/bash -l -c "echo '. /etc/profile.d/rvm.sh' >> ~/.bashrc"

@ -2956,6 +2956,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \
src/core/util/time_precise.h \
src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \
src/core/util/useful.h \
src/core/util/windows/cpu.cc \

@ -2736,6 +2736,7 @@ src/core/util/time.cc \
src/core/util/time_precise.cc \
src/core/util/time_precise.h \
src/core/util/tmpfile.h \
src/core/util/unique_ptr_with_bitset.h \
src/core/util/upb_utils.h \
src/core/util/useful.h \
src/core/util/windows/cpu.cc \

@ -2,21 +2,24 @@
This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix).
The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack
The setup builds gRPC docker images for each language/runtime and upload it to Artifact Registry (AR). These images, encapsulating gRPC stack
from specific releases/tag, are used to test version compatibility between gRPC release versions.
## Step-by-step instructions for adding a GCR image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are:
## Step-by-step instructions for adding a AR docker image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server.
When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test.
The steps are:
- Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release.
- Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do
- `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx python ruby php`
- Verify that the new docker image was built successfully and uploaded to GCR. For example,
- `gcloud container images list --repository gcr.io/grpc-testing` lists available images.
- `gcloud container images list-tags gcr.io/grpc-testing/grpc_interop_java` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/gcr/images/grpc-testing?project=grpc-testing
- Verify that the new docker image was built successfully and uploaded to AR. For example,
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public` lists available images.
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java --include-tags` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/artifacts/docker/grpc-testing/us/testing-images-public
- Verify the just-created docker client image would pass backward compatibility test (it should). For example,
- `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java:v1.9.9` followed by
- `docker_image=gcr.io/grpc-testing/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9` followed by
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- Commit the change and create a PR to upstream/master.
- Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc
- Once tests pass, request a PR review.
@ -24,30 +27,34 @@ We have continuous nightly test setup to test gRPC backward compatibility betwee
For more details on each step, refer to sections below.
## Instructions for adding new language/runtimes
- Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`.
- Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`.
- Add language/runtimes to `client_matrix.py` following existing language/runtimes examples.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to GCR.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to AR.
## Instructions for creating new test cases
- Create test cases by running `LANG=<lang> [RELEASE=<release>] ./create_testcases.sh`. For example,
- `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script.
- `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi <image_id>`.
- Stage and commit the generated test case file `./testcases/<lang>__<release>`.
## Instructions for running test cases against GCR images
## Instructions for running test cases against AR docker images
- Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options:
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the GCR images with the tag have been created using `create_matrix_images.py` above.
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the AR images with the tag have been created using `create_matrix_images.py` above.
- `--language` specifies a language. Defaults to `--language=all`.
For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`.
- The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml').
## Instructions for running test cases against a GCR image manually
- Download docker image from GCR. For example: `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0`.
## Instructions for running test cases against an AR image manually
- Download a docker image from AR. For example: `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0`.
- Run test cases by specifying `docker_image` variable inline with the test case script created above.
For example:
- `docker_image=gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in GCR.
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in AR.
Note:
- File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`).
- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing.
- Creating and referencing images in AR require read and write permission to AR path us-docker.pkg.dev/grpc-testing.

@ -53,9 +53,9 @@ _BUILD_INFO = "/var/local/build_info"
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
@ -175,7 +175,7 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
stack_base: the local gRPC repo path.
"""
basename = "grpc_interop_%s" % runtime
tag = "%s/%s:%s" % (args.gcr_path, basename, gcr_tag)
tag = "%s/%s:%s" % (args.docker_path, basename, gcr_tag)
build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
@ -407,8 +407,8 @@ for lang in languages:
for image in docker_images:
if args.upload_images:
jobset.message("START", "Uploading %s" % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(":") != -1
# docker image name must be in the format <docker_path>/<image>:<gcr_tag>
assert image.startswith(args.docker_path) and image.find(":") != -1
# Add a tag to exclude the image from the GCP Vulnerability Scanner.
(image_name, tag_name) = image.rsplit(":", 1)
alternate_image = (

@ -56,9 +56,9 @@ _RELEASES = sorted(
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
"--release",
@ -348,7 +348,9 @@ languages = args.language if args.language != ["all"] else _LANGUAGES
total_num_failures = 0
_xml_report_tree = report_utils.new_junit_xml_tree()
for lang in languages:
docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path)
docker_images = _get_test_images_for_lang(
lang, args.release, args.docker_path
)
for runtime in sorted(docker_images.keys()):
total_num_failures += _run_tests_for_lang(
lang, runtime, docker_images[runtime], _xml_report_tree

@ -20,11 +20,8 @@ import %workspace%/tools/remote_build/include/rbe_base_config.bazelrc
# configure backend for remote execution
build --remote_executor=grpcs://remotebuildexecution.googleapis.com
build --spawn_strategy=remote
build --strategy=Javac=remote
build --strategy=Closure=remote
build --genrule_strategy=remote
build --remote_timeout=7200 # very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
# Very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
build --remote_timeout=7200
# In the remote execution environment, each test gets its own docker containers
# and port server won't be available.

@ -502,7 +502,6 @@ def targets():
protobuf_version="3.25",
presubmit=True,
),
RubyDistribTest("linux", "x64", "centos7"),
RubyDistribTest("linux", "x64", "ubuntu2004"),
RubyDistribTest("linux", "x64", "ubuntu2204", presubmit=True),
# PHP7

@ -11419,6 +11419,30 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "unique_ptr_with_bitset_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,

Loading…
Cancel
Save