Merge branch 'master' into fix_build_protobuf_at_head_timeout

pull/37404/head
sreenithi 4 months ago
commit cae74938ff
No known key found for this signature in database
  1. 12
      bazel/grpc_deps.bzl
  2. 1
      grpc.def
  3. 16
      include/grpc/support/log.h
  4. 9
      src/core/handshaker/security/secure_endpoint.cc
  5. 131
      src/core/handshaker/security/security_handshaker.cc
  6. 89
      src/core/lib/channel/promise_based_filter.cc
  7. 36
      src/core/lib/gprpp/work_serializer.cc
  8. 9
      src/core/lib/iomgr/ev_apple.cc
  9. 110
      src/core/lib/iomgr/ev_epoll1_linux.cc
  10. 8
      src/core/lib/iomgr/ev_poll_posix.cc
  11. 12
      src/core/lib/iomgr/polling_entity.cc
  12. 49
      src/core/lib/iomgr/tcp_posix.cc
  13. 5
      src/core/lib/iomgr/tcp_server_posix.cc
  14. 16
      src/core/lib/iomgr/tcp_windows.cc
  15. 9
      src/core/lib/iomgr/timer_manager.cc
  16. 23
      src/core/lib/promise/for_each.h
  17. 20
      src/core/lib/promise/inter_activity_latch.h
  18. 40
      src/core/lib/promise/latch.h
  19. 23
      src/core/lib/resource_quota/memory_quota.cc
  20. 9
      src/core/lib/security/authorization/grpc_authorization_policy_provider.cc
  21. 9
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  22. 14
      src/core/lib/transport/bdp_estimator.cc
  23. 8
      src/core/lib/transport/call_filters.cc
  24. 7
      src/core/util/log.cc
  25. 5
      src/core/xds/xds_client/xds_client.cc
  26. 2
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  27. 3
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  28. 13
      src/ruby/lib/grpc/generic/active_call.rb
  29. 93
      src/ruby/spec/call_spec.rb
  30. 6
      src/ruby/spec/channel_spec.rb
  31. 655
      src/ruby/spec/client_server_spec.rb
  32. 150
      src/ruby/spec/generic/active_call_spec.rb
  33. 3
      src/ruby/spec/support/services.rb
  34. 57
      test/core/end2end/fuzzers/BUILD
  35. 189
      test/core/end2end/fuzzers/connector_fuzzer.cc
  36. 34
      test/core/end2end/fuzzers/connector_fuzzer.h
  37. 30
      test/core/end2end/fuzzers/connector_fuzzer_chttp2.cc
  38. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_corpus/empty
  39. 36
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec.cc
  40. 1
      test/core/end2end/fuzzers/connector_fuzzer_chttp2_fakesec_corpus/empty
  41. 23
      test/core/end2end/fuzzers/fuzzer_input.proto
  42. 50
      test/core/end2end/fuzzers/network_input.cc
  43. 6
      test/core/end2end/fuzzers/network_input.h
  44. 75
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.cc
  45. 39
      test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.h
  46. 39
      tools/interop_matrix/README.md
  47. 12
      tools/interop_matrix/create_matrix_images.py
  48. 10
      tools/interop_matrix/run_interop_matrix_tests.py
  49. 7
      tools/remote_build/include/rbe_remote_execution.bazelrc

@ -23,10 +23,10 @@ def grpc_deps():
if "platforms" not in native.existing_rules():
http_archive(
name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
],
)
@ -168,10 +168,10 @@ def grpc_deps():
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f",
)
if "bazel_compdb" not in native.existing_rules():

1
grpc.def generated

@ -233,7 +233,6 @@ EXPORTS
gpr_log
gpr_should_log
gpr_log_verbosity_init
gpr_set_log_function
gpr_format_message
gpr_strdup
gpr_asprintf

@ -55,22 +55,6 @@ GPRAPI int gpr_should_log(gpr_log_severity severity);
GPRAPI void gpr_log_verbosity_init(void);
/** Log overrides: applications can use this API to intercept logging calls
and use their own implementations */
struct gpr_log_func_args {
const char* file;
int line;
gpr_log_severity severity;
const char* message;
};
typedef struct gpr_log_func_args gpr_log_func_args;
typedef void (*gpr_log_func)(gpr_log_func_args* args);
GPRAPI void gpr_set_log_function(gpr_log_func deprecated_setting);
#ifdef __cplusplus
}
#endif

@ -108,7 +108,6 @@ struct secure_endpoint : public grpc_endpoint {
}
~secure_endpoint() {
memory_owner.Reset();
tsi_frame_protector_destroy(protector);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
grpc_slice_buffer_destroy(&source_buffer);
@ -380,9 +379,12 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
static void on_write(void* user_data, grpc_error_handle error) {
secure_endpoint* ep = static_cast<secure_endpoint*>(user_data);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, std::exchange(ep->write_cb, nullptr),
std::move(error));
grpc_closure* cb = ep->write_cb;
ep->write_cb = nullptr;
SECURE_ENDPOINT_UNREF(ep, "write");
grpc_core::EnsureRunInExecCtx([cb, error = std::move(error)]() {
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
});
}
static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
@ -505,6 +507,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->wrapped_ep.reset();
ep->memory_owner.Reset();
SECURE_ENDPOINT_UNREF(ep, "destroy");
}

@ -88,27 +88,27 @@ class SecurityHandshaker : public Handshaker {
private:
grpc_error_handle DoHandshakerNextLocked(const unsigned char* bytes_received,
size_t bytes_received_size);
size_t bytes_received_size)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
grpc_error_handle OnHandshakeNextDoneLocked(
tsi_result result, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
void HandshakeFailedLocked(absl::Status error);
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void HandshakeFailedLocked(absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Finish(absl::Status status);
void OnHandshakeDataReceivedFromPeerFn(absl::Status error);
void OnHandshakeDataSentToPeerFn(absl::Status error);
static void OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error);
static void OnHandshakeDataSentToPeerFnScheduler(void* arg,
grpc_error_handle error);
void OnHandshakeDataReceivedFromPeerFnScheduler(grpc_error_handle error);
void OnHandshakeDataSentToPeerFnScheduler(grpc_error_handle error);
static void OnHandshakeNextDoneGrpcWrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
static void OnPeerCheckedFn(void* arg, grpc_error_handle error);
void OnPeerCheckedInner(grpc_error_handle error);
void OnPeerCheckedFn(grpc_error_handle error);
size_t MoveReadBufferIntoHandshakeBuffer();
grpc_error_handle CheckPeerLocked();
grpc_error_handle CheckPeerLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// State set at creation time.
tsi_handshaker* handshaker_;
@ -125,13 +125,11 @@ class SecurityHandshaker : public Handshaker {
size_t handshake_buffer_size_;
unsigned char* handshake_buffer_;
SliceBuffer outgoing_;
grpc_closure on_handshake_data_sent_to_peer_;
grpc_closure on_handshake_data_received_from_peer_;
grpc_closure on_peer_checked_;
RefCountedPtr<grpc_auth_context> auth_context_;
tsi_handshaker_result* handshaker_result_ = nullptr;
size_t max_frame_size_ = 0;
std::string tsi_handshake_error_;
grpc_closure* on_peer_checked_ ABSL_GUARDED_BY(mu_) = nullptr;
};
SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
@ -143,10 +141,7 @@ SecurityHandshaker::SecurityHandshaker(tsi_handshaker* handshaker,
handshake_buffer_(
static_cast<uint8_t*>(gpr_malloc(handshake_buffer_size_))),
max_frame_size_(
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {
GRPC_CLOSURE_INIT(&on_peer_checked_, &SecurityHandshaker::OnPeerCheckedFn,
this, grpc_schedule_on_exec_ctx);
}
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0))) {}
SecurityHandshaker::~SecurityHandshaker() {
tsi_handshaker_destroy(handshaker_);
@ -220,8 +215,9 @@ MakeChannelzSecurityFromAuthContext(grpc_auth_context* auth_context) {
} // namespace
void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
void SecurityHandshaker::OnPeerCheckedFn(grpc_error_handle error) {
MutexLock lock(&mu_);
on_peer_checked_ = nullptr;
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(error);
return;
@ -317,11 +313,6 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error_handle error) {
Finish(absl::OkStatus());
}
void SecurityHandshaker::OnPeerCheckedFn(void* arg, grpc_error_handle error) {
RefCountedPtr<SecurityHandshaker>(static_cast<SecurityHandshaker*>(arg))
->OnPeerCheckedInner(error);
}
grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
tsi_peer peer;
tsi_result result =
@ -330,8 +321,12 @@ grpc_error_handle SecurityHandshaker::CheckPeerLocked() {
return GRPC_ERROR_CREATE(absl::StrCat("Peer extraction failed (",
tsi_result_to_string(result), ")"));
}
on_peer_checked_ = NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnPeerCheckedFn(std::move(status));
});
connector_->check_peer(peer, args_->endpoint.get(), args_->args,
&auth_context_, &on_peer_checked_);
&auth_context_, on_peer_checked_);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
auth_context_.get(), GRPC_TRANSPORT_SECURITY_LEVEL_PROPERTY_NAME);
const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
@ -356,10 +351,10 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
CHECK_EQ(bytes_to_send_size, 0u);
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
return error;
}
@ -387,19 +382,19 @@ grpc_error_handle SecurityHandshaker::OnHandshakeNextDoneLocked(
reinterpret_cast<const char*>(bytes_to_send), bytes_to_send_size));
grpc_endpoint_write(
args_->endpoint.get(), outgoing_.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_sent_to_peer_,
&SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler, this,
grpc_schedule_on_exec_ctx),
NewClosure(
[self = RefAsSubclass<SecurityHandshaker>()](absl::Status status) {
self->OnHandshakeDataSentToPeerFnScheduler(std::move(status));
}),
nullptr, /*max_frame_size=*/INT_MAX);
} else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer.
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
// Handshake has finished, check peer and so on.
@ -418,8 +413,6 @@ void SecurityHandshaker::OnHandshakeNextDoneGrpcWrapper(
result, bytes_to_send, bytes_to_send_size, handshaker_result);
if (!error.ok()) {
h->HandshakeFailedLocked(std::move(error));
} else {
h.release(); // Avoid unref
}
}
@ -429,13 +422,15 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
const unsigned char* bytes_to_send = nullptr;
size_t bytes_to_send_size = 0;
tsi_handshaker_result* hs_result = nullptr;
auto self = RefAsSubclass<SecurityHandshaker>();
tsi_result result = tsi_handshaker_next(
handshaker_, bytes_received, bytes_received_size, &bytes_to_send,
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper, this,
&tsi_handshake_error_);
&bytes_to_send_size, &hs_result, &OnHandshakeNextDoneGrpcWrapper,
self.get(), &tsi_handshake_error_);
if (result == TSI_ASYNC) {
// Handshaker operating asynchronously. Nothing else to do here;
// callback will be invoked in a TSI thread.
// Handshaker operating asynchronously. Callback will be invoked in a TSI
// thread. We no longer own the ref held in self.
self.release();
return absl::OkStatus();
}
// Handshaker returned synchronously. Invoke callback directly in
@ -449,18 +444,18 @@ grpc_error_handle SecurityHandshaker::DoHandshakerNextLocked(
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataReceivedFromPeerFn(std::move(error));
});
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnHandshakeDataReceivedFromPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -473,8 +468,6 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
error = DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(std::move(error));
} else {
handshaker.release(); // Avoid unref
}
}
@ -483,18 +476,18 @@ void SecurityHandshaker::OnHandshakeDataReceivedFromPeerFn(absl::Status error) {
// TODO(roth): This will no longer be necessary once we migrate to the
// EventEngine endpoint API.
void SecurityHandshaker::OnHandshakeDataSentToPeerFnScheduler(
void* arg, grpc_error_handle error) {
SecurityHandshaker* handshaker = static_cast<SecurityHandshaker*>(arg);
handshaker->args_->event_engine->Run(
[handshaker, error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
handshaker->OnHandshakeDataSentToPeerFn(std::move(error));
});
grpc_error_handle error) {
args_->event_engine->Run([self = RefAsSubclass<SecurityHandshaker>(),
error = std::move(error)]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
self->OnHandshakeDataSentToPeerFn(std::move(error));
// Avoid destruction outside of an ExecCtx (since this is non-cancelable).
self.reset();
});
}
void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
RefCountedPtr<SecurityHandshaker> handshaker(this);
MutexLock lock(&mu_);
if (!error.ok() || is_shutdown_) {
HandshakeFailedLocked(
@ -505,10 +498,10 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
if (handshaker_result_ == nullptr) {
grpc_endpoint_read(
args_->endpoint.get(), args_->read_buffer.c_slice_buffer(),
GRPC_CLOSURE_INIT(
&on_handshake_data_received_from_peer_,
&SecurityHandshaker::OnHandshakeDataReceivedFromPeerFnScheduler,
this, grpc_schedule_on_exec_ctx),
NewClosure([self = RefAsSubclass<SecurityHandshaker>()](
absl::Status status) {
self->OnHandshakeDataReceivedFromPeerFnScheduler(std::move(status));
}),
/*urgent=*/true, /*min_progress_size=*/1);
} else {
error = CheckPeerLocked();
@ -517,7 +510,6 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(absl::Status error) {
return;
}
}
handshaker.release(); // Avoid unref
}
//
@ -528,7 +520,7 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
is_shutdown_ = true;
connector_->cancel_check_peer(&on_peer_checked_, std::move(error));
connector_->cancel_check_peer(on_peer_checked_, std::move(error));
tsi_handshaker_shutdown(handshaker_);
args_->endpoint.reset();
}
@ -537,7 +529,6 @@ void SecurityHandshaker::Shutdown(grpc_error_handle error) {
void SecurityHandshaker::DoHandshake(
HandshakerArgs* args,
absl::AnyInvocable<void(absl::Status)> on_handshake_done) {
auto ref = Ref();
MutexLock lock(&mu_);
args_ = args;
on_handshake_done_ = std::move(on_handshake_done);
@ -546,8 +537,6 @@ void SecurityHandshaker::DoHandshake(
DoHandshakerNextLocked(handshake_buffer_, bytes_received_size);
if (!error.ok()) {
HandshakeFailedLocked(error);
} else {
ref.release(); // Avoid unref
}
}

@ -398,11 +398,9 @@ bool BaseCallData::SendMessage::IsIdle() const {
void BaseCallData::SendMessage::OnComplete(absl::Status status) {
Flusher flusher(base_);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
switch (state_) {
case State::kInitial:
case State::kIdle:
@ -429,11 +427,9 @@ void BaseCallData::SendMessage::OnComplete(absl::Status status) {
void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
switch (state_) {
case State::kCancelled:
case State::kCancelledButNotYetPolled:
@ -681,11 +677,10 @@ void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
}
void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
switch (state_) {
case State::kInitial:
case State::kIdle:
@ -722,11 +717,9 @@ void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
void BaseCallData::ReceiveMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " ReceiveMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
switch (state_) {
case State::kInitial:
state_ = State::kCancelled;
@ -842,11 +835,10 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
CHECK(push_.has_value());
auto r_push = (*push_)();
if (auto* p = r_push.value_if_ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete: "
<< (*p ? "true" : "false");
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete: "
<< (*p ? "true" : "false");
// We haven't pulled through yet, so this certainly shouldn't succeed.
CHECK(!*p);
state_ = State::kCancelled;
@ -1366,9 +1358,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
CapturedBatch batch(b);
Flusher flusher(this);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " StartBatch " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO) << LogTag() << " StartBatch " << DebugString();
// If this is a cancel stream, cancel anything we have pending and propagate
// the cancellation.
@ -1489,9 +1479,8 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ClientCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " Cancel error=" << error.ToString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " Cancel error=" << error.ToString();
// Track the latest reason for cancellation.
cancelled_error_ = error;
// Stop running the promise.
@ -1568,11 +1557,10 @@ void ClientCallData::StartPromise(Flusher* flusher) {
}
void ClientCallData::RecvInitialMetadataReady(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ClientCallData.RecvInitialMetadataReady "
<< DebugString() << " error:" << error.ToString()
<< " md:" << recv_initial_metadata_->metadata->DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.RecvInitialMetadataReady "
<< DebugString() << " error:" << error.ToString()
<< " md:" << recv_initial_metadata_->metadata->DebugString();
ScopedContext context(this);
Flusher flusher(this);
if (!error.ok()) {
@ -1974,9 +1962,8 @@ ServerCallData::ServerCallData(grpc_call_element* elem,
}
ServerCallData::~ServerCallData() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ~ServerCallData " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ~ServerCallData " << DebugString();
if (send_initial_metadata_ != nullptr) {
send_initial_metadata_->~SendInitialMetadata();
}
@ -2001,9 +1988,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
Flusher flusher(this);
bool wake = false;
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " StartBatch: " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO) << LogTag() << " StartBatch: " << DebugString();
// If this is a cancel stream, cancel anything we have pending and
// propagate the cancellation.
@ -2306,9 +2291,8 @@ void ServerCallData::RecvInitialMetadataReadyCallback(void* arg,
void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
Flusher flusher(this);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": RecvInitialMetadataReady " << error;
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": RecvInitialMetadataReady " << error;
CHECK(recv_initial_state_ == RecvInitialState::kForwarded);
// If there was an error we just propagate that through
if (!error.ok()) {
@ -2370,9 +2354,8 @@ std::string ServerCallData::DebugString() const {
// Wakeup and poll the promise if appropriate.
void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
PollContext poll_ctx(this, flusher);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner " << DebugString();
poll_ctx.ClearRepoll();
if (send_initial_metadata_ != nullptr) {
if (send_initial_metadata_->state ==
@ -2392,12 +2375,12 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
}
if (send_initial_metadata_->metadata_push_.has_value()) {
if ((*send_initial_metadata_->metadata_push_)().ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner: metadata_push done";
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner: metadata_push done";
send_initial_metadata_->metadata_push_.reset();
} else if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner: metadata_push pending";
} else {
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner: metadata_push pending";
}
}
}

@ -161,9 +161,8 @@ void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
refs_.fetch_sub(MakeRefPair(1, 0), std::memory_order_acq_rel);
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << " Scheduling on queue : item " << cb_wrapper;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Scheduling on queue : item " << cb_wrapper;
queue_.Push(&cb_wrapper->mpscq_node);
}
}
@ -172,19 +171,15 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
std::function<void()> callback, const DebugLocation& location) {
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::Schedule() " << this
<< " Scheduling callback " << cb_wrapper << " ["
<< location.file() << ":" << location.line() << "]";
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::Schedule() " << this << " Scheduling callback "
<< cb_wrapper << " [" << location.file() << ":" << location.line() << "]";
refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_acq_rel);
queue_.Push(&cb_wrapper->mpscq_node);
}
void WorkSerializer::LegacyWorkSerializer::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::Orphan() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO) << "WorkSerializer::Orphan() " << this;
const uint64_t prev_ref_pair =
refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel);
if (GetOwners(prev_ref_pair) == 0 && GetSize(prev_ref_pair) == 1) {
@ -196,9 +191,8 @@ void WorkSerializer::LegacyWorkSerializer::Orphan() {
// The thread that calls this loans itself to the work serializer so as to
// execute all the scheduled callbacks.
void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::DrainQueue() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::DrainQueue() " << this;
// Attempt to take ownership of the WorkSerializer. Also increment the queue
// size as required by `DrainQueueOwned()`.
const uint64_t prev_ref_pair =
@ -217,9 +211,8 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
}
void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::DrainQueueOwned() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::DrainQueueOwned() " << this;
while (true) {
auto prev_ref_pair = refs_.fetch_sub(MakeRefPair(0, 1));
// It is possible that while draining the queue, the last callback ended
@ -264,11 +257,10 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Queue returned nullptr, trying again";
}
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << " Running item " << cb_wrapper
<< " : callback scheduled at [" << cb_wrapper->location.file()
<< ":" << cb_wrapper->location.line() << "]";
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Running item " << cb_wrapper << " : callback scheduled at ["
<< cb_wrapper->location.file() << ":" << cb_wrapper->location.line()
<< "]";
cb_wrapper->callback();
delete cb_wrapper;
}

@ -39,15 +39,6 @@
#include "src/core/lib/gprpp/time_util.h"
#include "src/core/lib/iomgr/ev_apple.h"
#ifndef NDEBUG
#define GRPC_POLLING_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(apple_polling)) { \
VLOG(2) << "(polling) " << absl::StrFormat(format, __VA_ARGS__); \
}
#else
#define GRPC_POLLING_TRACE(...)
#endif // NDEBUG
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
struct GlobalRunLoopContext {

@ -727,9 +727,8 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "ps: " << ps << " poll got " << r << " events";
}
GRPC_TRACE_LOG(polling, INFO)
<< "ps: " << ps << " poll got " << r << " events";
gpr_atm_rel_store(&g_epoll_set.num_events, r);
gpr_atm_rel_store(&g_epoll_set.cursor, 0);
@ -746,9 +745,8 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_STARTS:" << worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_STARTS:" << worker;
if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the
@ -765,11 +763,10 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_REORG:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " is_reassigning=" << is_reassigning;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_REORG:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " is_reassigning=" << is_reassigning;
if (pollset->seen_inactive) {
if (neighborhood != pollset->neighborhood) {
gpr_mu_unlock(&neighborhood->mu);
@ -818,11 +815,10 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_WAIT:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " shutdown=" << pollset->shutting_down;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_WAIT:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " shutdown=" << pollset->shutting_down;
if (gpr_cv_wait(&worker->cv, &pollset->mu,
deadline.as_timespec(GPR_CLOCK_MONOTONIC)) &&
@ -877,17 +873,15 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(
&g_active_poller, 0,
reinterpret_cast<gpr_atm>(inspect_worker))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. choose next poller to be " << inspect_worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. choose next poller to be " << inspect_worker;
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. beaten to choose next poller";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. beaten to choose next poller";
}
// even if we didn't win the cas, there's a worker, we can stop
found_worker = true;
@ -903,9 +897,8 @@ static bool check_neighborhood_for_available_poller(
} while (!found_worker && inspect_worker != inspect->root_worker);
}
if (!found_worker) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. mark pollset " << inspect << " inactive";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. mark pollset " << inspect << " inactive";
inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) {
neighborhood->active_root =
@ -922,9 +915,7 @@ static bool check_neighborhood_for_available_poller(
static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " END_WORKER:" << worker;
}
GRPC_TRACE_LOG(polling, INFO) << "PS:" << pollset << " END_WORKER:" << worker;
if (worker_hdl != nullptr) *worker_hdl = nullptr;
// Make sure we appear kicked
SET_KICK_STATE(worker, KICKED);
@ -933,9 +924,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) ==
reinterpret_cast<gpr_atm>(worker)) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. choose next poller to be peer " << worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. choose next poller to be peer " << worker;
CHECK(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
@ -984,9 +974,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. remove worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. remove worker";
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset);
}
@ -1075,22 +1063,16 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
pollset->kicked_without_poller = true;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked_without_poller";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked_without_poller";
goto done;
}
grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. already kicked " << root_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. already kicked " << root_worker;
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. already kicked " << next_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. already kicked " << next_worker;
SET_KICK_STATE(next_worker, KICKED);
goto done;
} else if (root_worker == next_worker && // only try and wake up a poller
@ -1098,27 +1080,22 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
root_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked " << root_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked " << root_worker;
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked " << next_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked " << next_worker;
CHECK(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
} else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked root non-poller " << root_worker
<< " (initialized_cv=" << root_worker->initialized_cv
<< ") (poller=" << next_worker << ")";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. kicked root non-poller " << root_worker
<< " (initialized_cv=" << root_worker->initialized_cv
<< ") (poller=" << next_worker << ")";
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
gpr_cv_signal(&root_worker->cv);
@ -1137,9 +1114,7 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
goto done;
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked while waking up";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked while waking up";
goto done;
}
@ -1147,36 +1122,27 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
if (specific_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. specific worker already kicked";
}
GRPC_TRACE_LOG(polling, INFO) << " .. specific worker already kicked";
goto done;
} else if (g_current_thread_worker == specific_worker) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. mark " << specific_worker << " kicked";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. mark " << specific_worker << " kicked";
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick active poller";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick active poller";
SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick waiting worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick waiting worker";
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick non-waiting worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick non-waiting worker";
SET_KICK_STATE(specific_worker, KICKED);
goto done;
}

@ -1028,9 +1028,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << pollset << " poll=" << r;
}
GRPC_TRACE_LOG(polling, INFO) << pollset << " poll=" << r;
if (r < 0) {
if (errno != EINTR) {
@ -1052,9 +1050,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << pollset << ": got_wakeup";
}
GRPC_TRACE_LOG(polling, INFO) << pollset << ": got_wakeup";
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}

@ -30,8 +30,12 @@
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
if (pollset_set == nullptr) {
pollent.tag = GRPC_POLLS_NONE;
} else {
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
}
return pollent;
}
@ -73,6 +77,8 @@ void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));
@ -93,6 +99,8 @@ void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
CHECK_NE(pollent->pollent.pollset_set, nullptr);
grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
} else if (pollent->tag == GRPC_POLLS_NONE) {
// Do nothing.
} else {
grpc_core::Crash(
absl::StrFormat("Invalid grpc_polling_entity tag '%d'", pollent->tag));

@ -618,18 +618,14 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " destroy";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " destroy";
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " run";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " run";
gpr_mu_lock(p->pollset_mu);
grpc_core::Timestamp deadline =
grpc_core::Timestamp::Now() + grpc_core::Duration::Seconds(10);
@ -644,17 +640,13 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
g_backup_poller = nullptr;
g_uncovered_notifications_pending = 0;
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " shutdown";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " shutdown";
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " reschedule";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " reschedule";
grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
@ -691,9 +683,7 @@ static void cover_self(grpc_tcp* tcp) {
g_backup_poller = p;
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " create";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " create";
grpc_core::Executor::Run(
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
absl::OkStatus(), grpc_core::ExecutorType::DEFAULT,
@ -709,16 +699,12 @@ static void cover_self(grpc_tcp* tcp) {
}
static void notify_on_read(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " notify_on_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_read";
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " notify_on_write";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_write";
if (!grpc_event_engine_run_in_background()) {
cover_self(tcp);
}
@ -815,9 +801,8 @@ static void tcp_destroy(grpc_endpoint* ep) {
static void perform_reclamation(grpc_tcp* tcp)
ABSL_LOCKS_EXCLUDED(tcp->read_mu) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "TCP: benign reclamation to free memory";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "TCP: benign reclamation to free memory";
tcp->read_mu.Lock();
if (tcp->incoming_buffer != nullptr) {
grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
@ -910,9 +895,7 @@ static void update_rcvlowat(grpc_tcp* tcp)
static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
GRPC_LATENT_SEE_INNER_SCOPE("tcp_do_read");
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " do_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " do_read";
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@ -1479,9 +1462,7 @@ static bool process_errors(grpc_tcp* tcp) {
static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " got_error: " << error;
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " got_error: " << error;
if (!error.ok() ||
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
@ -1809,9 +1790,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
: tcp_flush(tcp, &error);
if (!flush_result) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "write: delayed";
}
GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
notify_on_write(tcp);
// tcp_flush does not populate error if it has returned false.
DCHECK(error.ok());
@ -1880,9 +1859,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write");
tcp->write_cb = cb;
tcp->current_zerocopy_send = zerocopy_send_record;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "write: delayed";
}
GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
notify_on_write(tcp);
} else {
GRPC_TRACE_LOG(tcp, INFO) << "write: " << grpc_core::StatusToString(error);

@ -459,9 +459,8 @@ static void on_read(void* arg, grpc_error_handle err) {
LOG(ERROR) << "Invalid address: " << addr_uri.status();
goto error;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "SERVER_CONNECT: incoming connection: " << *addr_uri;
}
GRPC_TRACE_LOG(tcp, INFO)
<< "SERVER_CONNECT: incoming connection: " << *addr_uri;
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);

@ -176,9 +176,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
grpc_winsocket* socket = tcp->socket;
grpc_winsocket_callback_info* info = &socket->read_info;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " on_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " on_read";
if (error.ok()) {
if (info->wsa_error != 0 && !tcp->shutting_down) {
@ -208,9 +206,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
}
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " unref read_slice";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " unref read_slice";
grpc_slice_buffer_reset_and_unref(tcp->read_slices);
error = grpc_error_set_int(
tcp->shutting_down ? GRPC_ERROR_CREATE("TCP stream shutting down")
@ -239,9 +235,7 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
WSABUF buffers[MAX_WSABUF_COUNT];
size_t i;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " win_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " win_read";
if (tcp->shutting_down) {
grpc_core::ExecCtx::Run(
@ -310,9 +304,7 @@ static void on_write(void* tcpp, grpc_error_handle error) {
grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " on_write";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " on_write";
gpr_mu_lock(&tcp->mu);
cb = tcp->write_cb;

@ -195,11 +195,10 @@ static bool wait_until(grpc_core::Timestamp next) {
gpr_cv_wait(&g_cv_wait, &g_mu, next.as_timespec(GPR_CLOCK_MONOTONIC));
if (GRPC_TRACE_FLAG_ENABLED(timer_check)) {
LOG(INFO) << "wait ended: was_timed:"
<< (my_timed_waiter_generation == g_timed_waiter_generation)
<< " kicked:" << g_kicked;
}
GRPC_TRACE_LOG(timer_check, INFO)
<< "wait ended: was_timed:"
<< (my_timed_waiter_generation == g_timed_waiter_generation)
<< " kicked:" << g_kicked;
// if this was the timed waiter, then we need to check timers, and flag
// that there's now no timed waiter... we'll look for a replacement if
// there's work to do after checking timers (code above)

@ -172,16 +172,13 @@ class ForEach {
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION Poll<Result> PollReaderNext() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext";
}
GRPC_TRACE_LOG(promise_primitives, INFO) << DebugTag() << " PollReaderNext";
auto r = reader_next_();
if (auto* p = r.value_if_ready()) {
switch (NextValueTraits<ReaderResult>::Type(*p)) {
case NextValueType::kValue: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got value";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got value";
Destruct(&reader_next_);
auto action = action_factory_.Make(
std::move(NextValueTraits<ReaderResult>::MutableValue(*p)));
@ -190,15 +187,13 @@ class ForEach {
return PollAction();
}
case NextValueType::kEndOfStream: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got end of stream";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got end of stream";
return Done<Result>::Make(false);
}
case NextValueType::kError: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got error";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got error";
return Done<Result>::Make(true);
}
}
@ -207,9 +202,7 @@ class ForEach {
}
Poll<Result> PollAction() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollAction";
}
GRPC_TRACE_LOG(promise_primitives, INFO) << DebugTag() << " PollAction";
auto r = in_action_.promise();
if (auto* p = r.value_if_ready()) {
if (IsStatusOk(*p)) {

@ -45,9 +45,8 @@ class InterActivityLatch {
auto Wait() {
return [this]() -> Poll<T> {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return std::move(value_);
} else {
@ -60,9 +59,8 @@ class InterActivityLatch {
// Set the latch.
void Set(T value) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_ = true;
value_ = std::move(value);
waiters_.WakeupAsync();
@ -102,9 +100,8 @@ class InterActivityLatch<void> {
auto Wait() {
return [this]() -> Poll<Empty> {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return Empty{};
} else {
@ -117,9 +114,8 @@ class InterActivityLatch<void> {
// Set the latch.
void Set() {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_ = true;
waiters_.WakeupAsync();
}

@ -67,9 +67,8 @@ class Latch {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Wait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Wait " << StateString();
if (has_value_) {
return std::move(value_);
} else {
@ -85,9 +84,8 @@ class Latch {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "WaitAndCopy " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "WaitAndCopy " << StateString();
if (has_value_) {
return value_;
} else {
@ -98,9 +96,8 @@ class Latch {
// Set the value of the latch. Can only be called once.
void Set(T value) {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
DCHECK(!has_value_);
value_ = std::move(value);
has_value_ = true;
@ -161,9 +158,8 @@ class Latch<void> {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<Empty> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return Empty{};
} else {
@ -174,9 +170,8 @@ class Latch<void> {
// Set the latch. Can only be called once.
void Set() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
DCHECK(!is_set_);
is_set_ = true;
waiter_.Wake();
@ -223,9 +218,8 @@ class ExternallyObservableLatch<void> {
// Produce a promise to wait for this latch.
auto Wait() {
return [this]() -> Poll<Empty> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (IsSet()) {
return Empty{};
} else {
@ -236,9 +230,8 @@ class ExternallyObservableLatch<void> {
// Set the latch.
void Set() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_.store(true, std::memory_order_relaxed);
waiter_.Wake();
}
@ -246,9 +239,8 @@ class ExternallyObservableLatch<void> {
bool IsSet() const { return is_set_.load(std::memory_order_relaxed); }
void Reset() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Reset " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Reset " << StateString();
is_set_.store(false, std::memory_order_relaxed);
}

@ -355,9 +355,8 @@ void GrpcMemoryAllocatorImpl::MaybeDonateBack() {
if (free_bytes_.compare_exchange_weak(free, new_free,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "[" << this << "] Early return " << ret << " bytes";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "[" << this << "] Early return " << ret << " bytes";
CHECK(taken_bytes_.fetch_sub(ret, std::memory_order_relaxed) >= ret);
memory_quota_->Return(ret);
return;
@ -548,9 +547,7 @@ void BasicMemoryQuota::Return(size_t amount) {
}
void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Adding allocator " << allocator;
}
GRPC_TRACE_LOG(resource_quota, INFO) << "Adding allocator " << allocator;
AllocatorBucket::Shard& shard = small_allocators_.SelectShard(allocator);
@ -561,9 +558,7 @@ void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
}
void BasicMemoryQuota::RemoveAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Removing allocator " << allocator;
}
GRPC_TRACE_LOG(resource_quota, INFO) << "Removing allocator " << allocator;
AllocatorBucket::Shard& small_shard =
small_allocators_.SelectShard(allocator);
@ -608,9 +603,8 @@ void BasicMemoryQuota::MaybeMoveAllocator(GrpcMemoryAllocatorImpl* allocator,
void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Moving allocator " << allocator << " to small";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Moving allocator " << allocator << " to small";
AllocatorBucket::Shard& old_shard = big_allocators_.SelectShard(allocator);
@ -629,9 +623,8 @@ void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
void BasicMemoryQuota::MaybeMoveAllocatorSmallToBig(
GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Moving allocator " << allocator << " to big";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Moving allocator " << allocator << " to big";
AllocatorBucket::Shard& old_shard = small_allocators_.SelectShard(allocator);

@ -167,11 +167,10 @@ absl::Status FileWatcherAuthorizationPolicyProvider::ForceUpdate() {
if (cb_ != nullptr) {
cb_(contents_changed, absl::OkStatus());
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) {
LOG(INFO) << "authorization policy reload status: successfully loaded new "
"policy\n"
<< file_contents_;
}
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "authorization policy reload status: successfully loaded new "
"policy\n"
<< file_contents_;
return absl::OkStatus();
}

@ -511,11 +511,10 @@ grpc_call_credentials* grpc_google_refresh_token_credentials_create(
const char* json_refresh_token, void* reserved) {
grpc_auth_refresh_token token =
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (GRPC_TRACE_FLAG_ENABLED(api)) {
LOG(INFO) << "grpc_refresh_token_credentials_create(json_refresh_token="
<< create_loggable_refresh_token(&token)
<< ", reserved=" << reserved << ")";
}
GRPC_TRACE_LOG(api, INFO)
<< "grpc_refresh_token_credentials_create(json_refresh_token="
<< create_loggable_refresh_token(&token) << ", reserved=" << reserved
<< ")";
CHECK_EQ(reserved, nullptr);
return grpc_refresh_token_credentials_create_from_auth_refresh_token(token)
.release();

@ -47,18 +47,16 @@ Timestamp BdpEstimator::CompletePing() {
1e-9 * static_cast<double>(dt_ts.tv_nsec);
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
Duration start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]:complete acc=" << accumulator_
<< " est=" << estimate_ << " dt=" << dt << " bw=" << bw / 125000.0
<< "Mbs bw_est=" << bw_est_ / 125000.0 << "Mbs";
}
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:complete acc=" << accumulator_
<< " est=" << estimate_ << " dt=" << dt << " bw=" << bw / 125000.0
<< "Mbs bw_est=" << bw_est_ / 125000.0 << "Mbs";
CHECK(ping_state_ == PingState::STARTED);
if (accumulator_ > 2 * estimate_ / 3 && bw > bw_est_) {
estimate_ = std::max(accumulator_, estimate_ * 2);
bw_est_ = bw;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]: estimate increased to " << estimate_;
}
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]: estimate increased to " << estimate_;
inter_ping_delay_ /= 2; // if the ping estimate changes,
// exponentially get faster at probing
} else if (inter_ping_delay_ < Duration::Seconds(10)) {

@ -200,11 +200,9 @@ void CallFilters::CancelDueToFailedPipeOperation(SourceLocation but_where) {
void CallFilters::PushServerTrailingMetadata(ServerMetadataHandle md) {
CHECK(md != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(call)) {
LOG(INFO) << GetContext<Activity>()->DebugTag()
<< " PushServerTrailingMetadata[" << this
<< "]: " << md->DebugString() << " into " << DebugString();
}
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << " PushServerTrailingMetadata["
<< this << "]: " << md->DebugString() << " into " << DebugString();
CHECK(md != nullptr);
if (call_state_.PushServerTrailingMetadata(
md->get(GrpcCallWasCancelled()).value_or(false))) {

@ -119,10 +119,3 @@ void gpr_log_verbosity_init(void) {
}
#endif // GRPC_VERBOSITY_MACRO
}
void gpr_set_log_function([[maybe_unused]] gpr_log_func deprecated_setting) {
LOG(ERROR)
<< "This function is deprecated. This function will be deleted in the "
"next gRPC release. You may create a new absl LogSink with similar "
"functionality. gRFC: https://github.com/grpc/proposal/pull/425 ";
}

@ -1564,8 +1564,9 @@ XdsClient::XdsClient(
}
CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) {
LOG(INFO) << "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] xDS node ID: " << bootstrap_->node()->id();
}
}

@ -256,7 +256,6 @@ gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import;
gpr_log_type gpr_log_import;
gpr_should_log_type gpr_should_log_import;
gpr_log_verbosity_init_type gpr_log_verbosity_init_import;
gpr_set_log_function_type gpr_set_log_function_import;
gpr_format_message_type gpr_format_message_import;
gpr_strdup_type gpr_strdup_import;
gpr_asprintf_type gpr_asprintf_import;
@ -542,7 +541,6 @@ void grpc_rb_load_imports(HMODULE library) {
gpr_log_import = (gpr_log_type) GetProcAddress(library, "gpr_log");
gpr_should_log_import = (gpr_should_log_type) GetProcAddress(library, "gpr_should_log");
gpr_log_verbosity_init_import = (gpr_log_verbosity_init_type) GetProcAddress(library, "gpr_log_verbosity_init");
gpr_set_log_function_import = (gpr_set_log_function_type) GetProcAddress(library, "gpr_set_log_function");
gpr_format_message_import = (gpr_format_message_type) GetProcAddress(library, "gpr_format_message");
gpr_strdup_import = (gpr_strdup_type) GetProcAddress(library, "gpr_strdup");
gpr_asprintf_import = (gpr_asprintf_type) GetProcAddress(library, "gpr_asprintf");

@ -744,9 +744,6 @@ extern gpr_should_log_type gpr_should_log_import;
typedef void(*gpr_log_verbosity_init_type)(void);
extern gpr_log_verbosity_init_type gpr_log_verbosity_init_import;
#define gpr_log_verbosity_init gpr_log_verbosity_init_import
typedef void(*gpr_set_log_function_type)(gpr_log_func deprecated_setting);
extern gpr_set_log_function_type gpr_set_log_function_import;
#define gpr_set_log_function gpr_set_log_function_import
typedef char*(*gpr_format_message_type)(int messageid);
extern gpr_format_message_type gpr_format_message_import;
#define gpr_format_message gpr_format_message_import

@ -44,8 +44,8 @@ module GRPC
include Core::CallOps
extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
:trailing_metadata, :status
def_delegators :@call, :cancel, :cancel_with_status, :metadata,
:write_flag, :write_flag=, :trailing_metadata, :status
# client_invoke begins a client invocation.
#
@ -620,6 +620,8 @@ module GRPC
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
def start_call(metadata = {})
# TODO(apolcyn): we should cancel and clean up the call in case this
# send initial MD op fails.
merge_metadata_to_send(metadata) && send_initial_metadata
end
@ -665,9 +667,10 @@ module GRPC
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
# TODO(apolcyn): expose peer getter
Operation = view_class(:cancel, :cancel_with_status, :cancelled?, :deadline,
:execute, :metadata, :status, :start_call, :wait,
:write_flag, :write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline

@ -90,88 +90,101 @@ describe GRPC::Core::Call do
describe '#status' do
it 'can save the status and read it back' do
call = make_test_call
sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts)
make_test_call do |call|
sts = Struct::Status.new(OK, 'OK')
expect { call.status = sts }.not_to raise_error
expect(call.status).to eq(sts)
end
end
it 'must be set to a status' do
call = make_test_call
bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError)
make_test_call do |call|
bad_sts = Object.new
expect { call.status = bad_sts }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
expect { call.status = nil }.not_to raise_error
make_test_call do |call|
expect { call.status = nil }.not_to raise_error
end
end
end
describe '#metadata' do
it 'can save the metadata hash and read it back' do
call = make_test_call
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
make_test_call do |call|
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
end
end
it 'must be set with a hash' do
call = make_test_call
bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError)
make_test_call do |call|
bad_md = Object.new
expect { call.metadata = bad_md }.to raise_error(TypeError)
end
end
it 'can be set to nil' do
call = make_test_call
expect { call.metadata = nil }.not_to raise_error
make_test_call do |call|
expect { call.metadata = nil }.not_to raise_error
end
end
end
describe '#set_credentials!' do
it 'can set a valid CallCredentials object' do
call = make_test_call
auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error
make_test_call do |call|
auth_proc = proc { { 'plugin_key' => 'plugin_value' } }
creds = GRPC::Core::CallCredentials.new auth_proc
expect { call.set_credentials! creds }.not_to raise_error
end
end
end
describe '#cancel' do
it 'completes ok' do
call = make_test_call
expect { call.cancel }.not_to raise_error
make_test_call do |call|
expect { call.cancel }.not_to raise_error
end
end
it 'completes ok when the call is closed' do
call = make_test_call
call.close
expect { call.cancel }.not_to raise_error
make_test_call do |call|
call.close
expect { call.cancel }.not_to raise_error
end
end
end
describe '#cancel_with_status' do
it 'completes ok' do
call = make_test_call
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
expect do
call.cancel_with_status(0, nil)
end.to raise_error(TypeError)
make_test_call do |call|
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
expect do
call.cancel_with_status(0, nil)
end.to raise_error(TypeError)
end
end
it 'completes ok when the call is closed' do
call = make_test_call
call.close
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
make_test_call do |call|
call.close
expect do
call.cancel_with_status(0, 'test status')
end.not_to raise_error
end
end
end
def make_test_call
@ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = @ch.create_call(nil, nil, 'phony_method', nil, deadline)
yield call
call.close
end
def deadline

@ -118,7 +118,8 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
call.close
end
expect(&blk).to_not raise_error
end
@ -132,8 +133,9 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = proc do
ch.create_call(nil, nil, 'phony_method', nil, deadline)
call = ch.create_call(nil, nil, 'phony_method', nil, deadline)
STDERR.puts "#{Time.now}: created call"
call.close
end
expect(&blk).to raise_error(RuntimeError)
STDERR.puts "#{Time.now}: finished: raises an error if called on a closed channel"

@ -16,36 +16,8 @@ require 'spec_helper'
include GRPC::Core
shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
def deadline
Time.now + 5
end
def server_allows_client_to_proceed(metadata = {})
recvd_rpc = @server.request_call
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
ops = { CallOps::SEND_INITIAL_METADATA => metadata }
server_batch = server_call.run_batch(ops)
expect(server_batch.send_metadata).to be true
server_call
end
def new_client_call
@ch.create_call(nil, nil, '/method', nil, deadline)
end
def ok_status
Struct::Status.new(StatusCodes::OK, 'OK')
end
end
shared_examples 'basic GRPC message delivery is OK' do
include GRPC::Core
include_context 'setup: tags'
context 'the test channel' do
it 'should have a target' do
@ -53,272 +25,45 @@ shared_examples 'basic GRPC message delivery is OK' do
end
end
context 'a client call' do
it 'should have a peer' do
expect(new_client_call.peer).to be_a(String)
end
end
it 'calls have peer info' do
call = new_client_call
expect(call.peer).to be_a(String)
end
it 'servers receive requests from clients and can respond' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'responses written by servers are received by the client' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(sent_message)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
# finish the call
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq(reply_text)
expect(final_client_batch.status.code).to eq(0)
end
it 'compressed messages can be sent and received' do
call = new_client_call
server_call = nil
long_request_str = '0' * 2000
long_response_str = '1' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
server_thread = Thread.new do
server_call = server_allows_client_to_proceed(md)
it 'unary calls work' do
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_MESSAGE => long_request_str,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq(long_request_str)
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_MESSAGE => long_response_str,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
expect(server_batch.send_message).to be true
expect(server_batch.send_status).to be true
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.message).to eq long_response_str
expect(final_client_batch.status.code).to eq(0)
end
it 'servers can ignore a client write and send a status' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
it 'unary calls work when enabling compression' do
run_services_on_server(@server, services: [EchoService]) do
long_request_str = '0' * 2000
md = { 'grpc-internal-encoding-request' => 'gzip' }
call = @stub.an_rpc(EchoMsg.new(msg: long_request_str),
return_op: true,
metadata: md)
response = call.execute
expect(response).to be_a(EchoMsg)
expect(response.msg).to eq(long_request_str)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
the_status = Struct::Status.new(StatusCodes::OK, 'OK')
server_thread.join
server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq nil
expect(server_batch.send_status).to be true
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'completes calls by sending status to client and server' do
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.message).to eq sent_message
server_ops = {
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_status).to be true
expect(server_batch.send_message).to be true
# confirm the client can receive the server response and status.
client_ops = {
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.send_close).to be true
expect(final_client_batch.message).to eq reply_text
expect(final_client_batch.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
final_server_batch = server_call.run_batch(server_ops)
expect(final_server_batch.send_close).to be true
end
def client_cancel_test(cancel_proc, expected_code,
expected_details)
call = new_client_call
server_call = nil
server_thread = Thread.new do
server_call = server_allows_client_to_proceed
call = @stub.an_rpc(EchoMsg.new, return_op: true)
run_services_on_server(@server, services: [EchoService]) do
# start the call, but don't send a message yet
call.start_call
# cancel the call
cancel_proc.call(call)
# check the client's status
failed = false
begin
call.execute
rescue GRPC::BadStatus => e
failed = true
expect(e.code).to be expected_code
expect(e.details).to eq expected_details
end
expect(failed).to be(true)
end
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::RECV_INITIAL_METADATA => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.metadata).to eq({})
cancel_proc.call(call)
server_thread.join
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
server_batch = server_call.run_batch(server_ops)
expect(server_batch.send_close).to be true
client_ops = {
CallOps::RECV_STATUS_ON_CLIENT => {}
}
client_batch = call.run_batch(client_ops)
expect(client_batch.status.code).to be expected_code
expect(client_batch.status.details).to eq expected_details
end
it 'clients can cancel a call on the server' do
@ -344,8 +89,6 @@ shared_examples 'basic GRPC message delivery is OK' do
end
shared_examples 'GRPC metadata delivery works OK' do
include_context 'setup: tags'
describe 'from client => server' do
before(:example) do
n = 7 # arbitrary number of metadata
@ -364,53 +107,31 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
call.run_batch(client_ops)
# NOTE: no need to run a server in this test b/c the failure
# happens while validating metadata to send.
failed = false
begin
@stub.an_rpc(EchoMsg.new, metadata: md)
rescue TypeError => e
failed = true
expect(e.message).to eq('grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
expect(failed).to be(true)
end
end
it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each_with_index do |md, i|
expect(@stub.an_rpc(EchoMsg.new, metadata: md)).to be_a(EchoMsg)
# confirm the server can receive the client metadata
# finish the call
expect(service.received_md.length).to eq(i + 1)
md.each do |k, v|
expect(service.received_md[i][k.to_s]).to eq(v)
end
end
call = new_client_call
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
# finish the call
final_server_batch = recvd_rpc.call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
end
end
@ -432,120 +153,61 @@ shared_examples 'GRPC metadata delivery works OK' do
end
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil
}
call.run_batch(client_ops)
# server gets the invocation
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
recvd_rpc.call.run_batch(server_ops)
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@bad_keys.each do |md|
proceed = Queue.new
server_exception = nil
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
rescue TypeError => e
server_exception = e
ensure
proceed.push(1)
end
client_exception = nil
client_call = @stub.an_rpc(EchoMsg.new, return_op: true)
thr = Thread.new do
client_call.execute
rescue GRPC::BadStatus => e
client_exception = e
end
proceed.pop
# TODO(apolcyn): we shouldn't need this cancel here. It's
# only currently needed b/c the server does not seem to properly
# terminate the RPC if it fails to send initial metadata. That
# should be fixed, in which case this cancellation can be removed.
client_call.cancel
thr.join
p client_exception
expect(client_exception.nil?).to be(false)
expect(server_exception.nil?).to be(false)
expect(server_exception.message).to eq(
'grpc_rb_md_ary_fill_hash_cb: bad type for key parameter')
end
expect(&blk).to raise_error
# cancel the call so the server can shut down immediately
call.cancel
end
end
it 'sends an empty hash if no metadata is added' do
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
run_services_on_server(@server, services: [EchoService]) do
call = @stub.an_rpc(EchoMsg.new, return_op: true)
expect(call.execute).to be_a(EchoMsg)
expect(call.metadata).to eq({})
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
# receive close and send status to finish the call
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
# receive status to finish the call
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'sends all the pairs when keys and values are valid' do
@valid_metadata.each do |md|
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
service = EchoService.new
run_services_on_server(@server, services: [service]) do
@valid_metadata.each do |md|
service.on_call_started = proc do |call|
call.send_initial_metadata(md)
end
call = @stub.an_rpc(EchoMsg.new, return_op: true)
call.execute
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(call.metadata).to eq(replace_symbols)
end
call = new_client_call
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
client_batch = call.run_batch(client_ops)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => md,
CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
srv_batch = server_call.run_batch(server_ops)
expect(srv_batch.send_close).to be true
expect(srv_batch.send_metadata).to be true
expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
final_client_batch = call.run_batch(client_ops)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(final_client_batch.metadata).to eq(replace_symbols)
expect(final_client_batch.status.code).to eq(0)
end
end
end
@ -554,16 +216,11 @@ end
describe 'the http client/server' do
before(:example) do
server_host = '0.0.0.0:0'
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
@server.start
@ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
end
after(:example) do
@ch.close
@server.shutdown_and_notify(deadline)
@server.close
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -574,8 +231,6 @@ describe 'the http client/server' do
end
describe 'the secure http client/server' do
include_context 'setup: tags'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'server1.key', 'server1.pem']
@ -587,17 +242,14 @@ describe 'the secure http client/server' do
server_host = '0.0.0.0:0'
server_creds = GRPC::Core::ServerCredentials.new(
nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
@server = new_core_server_for_testing(nil)
@server = new_rpc_server_for_testing
server_port = @server.add_http2_port(server_host, server_creds)
@server.start
args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
@ch = Channel.new("0.0.0.0:#{server_port}", args,
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
end
after(:example) do
@server.shutdown_and_notify(deadline)
@server.close
@ch = Channel.new(
"0.0.0.0:#{server_port}", args,
GRPC::Core::ChannelCredentials.new(certs[0], nil, nil))
@stub = EchoStub.new(
"0.0.0.0:#{server_port}", nil, channel_override: @ch)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -606,59 +258,25 @@ describe 'the secure http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do
end
def credentials_update_test(creds_update_md)
auth_proc = proc { creds_update_md }
it 'modifies metadata with CallCredentials' do
# create call creds
auth_proc = proc { { 'k1' => 'v1' } }
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
initial_md_key = 'k2'
initial_md_val = 'v2'
initial_md = { initial_md_key => initial_md_val }
expected_md = creds_update_md.clone
fail 'bad test param' unless expected_md[initial_md_key].nil?
expected_md[initial_md_key] = initial_md_val
recvd_rpc = nil
rcv_thread = Thread.new do
recvd_rpc = @server.request_call
# create arbitrary custom metadata
custom_md = { 'k2' => 'v2' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = { 'k1' => 'v1', 'k2' => 'v2' }
expected_md.each do |k, v|
expect(echo_service.received_md[0][k]).to eq(v)
end
call = new_client_call
call.set_credentials! call_creds
client_batch = call.run_batch(
CallOps::SEND_INITIAL_METADATA => initial_md,
CallOps::SEND_CLOSE_FROM_CLIENT => nil)
expect(client_batch.send_metadata).to be true
expect(client_batch.send_close).to be true
# confirm the server can receive the client metadata
rcv_thread.join
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[expected_md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
credentials_update_test_finish_call(call, recvd_rpc.call)
end
def credentials_update_test_finish_call(client_call, server_call)
final_server_batch = server_call.run_batch(
CallOps::RECV_CLOSE_ON_SERVER => nil,
CallOps::SEND_INITIAL_METADATA => nil,
CallOps::SEND_STATUS_FROM_SERVER => ok_status)
expect(final_server_batch.send_close).to be(true)
expect(final_server_batch.send_metadata).to be(true)
expect(final_server_batch.send_status).to be(true)
final_client_batch = client_call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(final_client_batch.metadata).to eq({})
expect(final_client_batch.status.code).to eq(0)
end
it 'modifies metadata with CallCredentials' do
credentials_update_test('k1' => 'updated-v1')
end
it 'modifies large metadata with CallCredentials' do
@ -666,11 +284,34 @@ describe 'the secure http client/server' do
'00000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111',
)
md = {
k3: val_array,
k4: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey5: 'v1'
# create call creds
auth_proc = proc do
{
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
end
call_creds = GRPC::Core::CallCredentials.new(auth_proc)
# create arbitrary custom metadata
custom_md = { k1: 'v1' }
# perform an RPC
echo_service = EchoService.new
run_services_on_server(@server, services: [echo_service]) do
expect(@stub.an_rpc(EchoMsg.new,
credentials: call_creds,
metadata: custom_md)).to be_a(EchoMsg)
end
# call creds metadata should be merged with custom MD
expect(echo_service.received_md.length).to eq(1)
expected_md = {
k1: 'v1',
k2: val_array,
k3: '0000000000000000000000000000000000000000000000000000000000',
keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey4: 'v4'
}
credentials_update_test(md)
expected_md.each do |k, v|
expect(echo_service.received_md[0][k.to_s]).to eq(v)
end
end
end

@ -55,17 +55,20 @@ describe GRPC::ActiveCall do
end
@ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
:this_channel_is_insecure)
@call = make_test_call
end
after(:each) do
@server.shutdown_and_notify(deadline)
@server.close
@server_thread.join
# Don't rely on GC to unref the call, since that can prevent
# the channel connectivity state polling thread from shutting down.
@call.close
end
describe 'restricted view methods' do
before(:each) do
@call = make_test_call
ActiveCall.client_invoke(@call)
@client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
@ -117,9 +120,8 @@ describe GRPC::ActiveCall do
describe '#remote_send' do
it 'allows a client to send a payload to the server', test: true do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -137,15 +139,14 @@ describe GRPC::ActiveCall do
expect(server_call.remote_read).to eq(msg)
# finish the call
server_call.send_initial_metadata
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
it 'marshals the payload using the marshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal, @pass_through, deadline)
client_call = ActiveCall.new(@call, marshal, @pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -161,23 +162,22 @@ describe GRPC::ActiveCall do
metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg)
# finish the call
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS]
TEST_WRITE_FLAGS.each do |f|
it "successfully makes calls with write_flag set to #{f}" do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, marshal,
client_call = ActiveCall.new(@call, marshal,
@pass_through, deadline)
msg = 'message is a string'
client_call.write_flag = f
client_call.remote_send(msg)
# flush the message in case writes are set to buffered
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
# confirm that the message was marshalled
recvd_rpc = @received_rpcs_queue.pop
@ -199,9 +199,8 @@ describe GRPC::ActiveCall do
describe 'sending initial metadata', send_initial_metadata: true do
it 'sends metadata before sending a message if it hasnt been sent yet' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -213,13 +212,13 @@ describe GRPC::ActiveCall do
message = 'phony message'
expect(call).to(
expect(@call).to(
receive(:run_batch)
.with(
hash_including(
CallOps::SEND_INITIAL_METADATA => metadata)).once)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_MESSAGE => message)).once)
@client_call.remote_send(message)
@ -228,14 +227,12 @@ describe GRPC::ActiveCall do
end
it 'doesnt send metadata if it thinks its already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
expect(@client_call.metadata_sent).to eql(true)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA)).never)
@ -243,9 +240,7 @@ describe GRPC::ActiveCall do
end
it 'sends metadata if it is explicitly sent and ok to do so' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline,
@ -257,7 +252,7 @@ describe GRPC::ActiveCall do
@client_call.merge_metadata_to_send(metadata)
expect(@client_call.metadata_to_send).to eq(metadata)
expect(call).to(
expect(@call).to(
receive(:run_batch).with(hash_including(
CallOps::SEND_INITIAL_METADATA =>
metadata)).once)
@ -265,9 +260,7 @@ describe GRPC::ActiveCall do
end
it 'explicit sending does nothing if metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(call,
@client_call = ActiveCall.new(@call,
@pass_through,
@pass_through,
deadline)
@ -284,7 +277,6 @@ describe GRPC::ActiveCall do
describe '#merge_metadata_to_send', merge_metadata_to_send: true do
it 'adds to existing metadata when there is existing metadata to send' do
call = make_test_call
starting_metadata = {
k1: 'key1_val',
k2: 'key2_val',
@ -292,7 +284,7 @@ describe GRPC::ActiveCall do
}
@client_call = ActiveCall.new(
call,
@call,
@pass_through, @pass_through,
deadline,
started: false,
@ -318,9 +310,8 @@ describe GRPC::ActiveCall do
end
it 'fails when initial metadata has already been sent' do
call = make_test_call
@client_call = ActiveCall.new(
call,
@call,
@pass_through,
@pass_through,
deadline,
@ -338,9 +329,8 @@ describe GRPC::ActiveCall do
describe '#client_invoke' do
it 'sends metadata to the server when present' do
call = make_test_call
metadata = { k1: 'v1', k2: 'v2' }
ActiveCall.client_invoke(call, metadata)
ActiveCall.client_invoke(@call, metadata)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
expect(recvd_call).to_not be_nil
@ -349,15 +339,14 @@ describe GRPC::ActiveCall do
expect(recvd_rpc.metadata['k2']).to eq('v2')
# finish the call
recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {})
call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(call, recvd_call)
@call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
send_and_receive_close_and_status(@call, recvd_call)
end
end
describe '#send_status', send_status: true do
it 'works when no metadata or messages have been sent yet' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
server_call = ActiveCall.new(
@ -375,9 +364,8 @@ describe GRPC::ActiveCall do
describe '#remote_read', remote_read: true do
it 'reads the response sent by a server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -385,13 +373,12 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves no metadata when the server adds no metadata' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -401,13 +388,12 @@ describe GRPC::ActiveCall do
client_call.remote_read
expect(client_call.metadata).to eq({})
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'saves metadata add by the server' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -418,12 +404,11 @@ describe GRPC::ActiveCall do
expected = { 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected)
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'get a status from server when nothing else sent from server' do
client_call = make_test_call
ActiveCall.client_invoke(client_call)
ActiveCall.client_invoke(@call)
recvd_rpc = @received_rpcs_queue.pop
recvd_call = recvd_rpc.call
@ -438,22 +423,21 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'OK')
# Check that we can receive initial metadata and a status
client_call.run_batch(
@call.run_batch(
CallOps::RECV_INITIAL_METADATA => nil)
batch_result = client_call.run_batch(
batch_result = @call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(OK)
end
it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
server_call.send_status(OK, 'OK')
@ -463,10 +447,9 @@ describe GRPC::ActiveCall do
end
it 'unmarshals the response using the unmarshal func' do
call = make_test_call
ActiveCall.client_invoke(call)
ActiveCall.client_invoke(@call)
unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
unmarshal, deadline)
# confirm the client receives the unmarshalled message
@ -476,14 +459,13 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response')
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
end
describe '#each_remote_read' do
it 'creates an Enumerator' do
call = make_test_call
client_call = ActiveCall.new(call, @pass_through,
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
expect(client_call.each_remote_read).to be_a(Enumerator)
# finish the call
@ -491,9 +473,8 @@ describe GRPC::ActiveCall do
end
it 'the returned enumerator can read n responses' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
@ -506,18 +487,17 @@ describe GRPC::ActiveCall do
expect(e.next).to eq(reply)
end
send_and_receive_close_and_status(
call, inner_call_of_active_call(server_call))
@call, inner_call_of_active_call(server_call))
end
it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
reply = 'server_response'
client_call.remote_send(msg)
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
e = client_call.each_remote_read
n = 3 # arbitrary value > 1
@ -532,14 +512,13 @@ describe GRPC::ActiveCall do
describe '#closing the call from the client' do
it 'finishes ok if the server sends a status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
@ -549,9 +528,8 @@ describe GRPC::ActiveCall do
end
it 'finishes ok if the server sends an early status response' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -560,15 +538,14 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
@call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error
end
it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
ActiveCall.client_invoke(@call)
client_call = ActiveCall.new(@call, @pass_through,
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
@ -577,7 +554,7 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
expect do
call.run_batch(
@call.run_batch(
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil)
end.to_not raise_error
@ -631,6 +608,7 @@ describe GRPC::ActiveCall do
batch_result = @client_call.run_batch(
CallOps::RECV_STATUS_ON_CLIENT => nil)
expect(batch_result.status.code).to eq(@server_status)
@client_call.close
end
it 'sends the initial metadata implicitly if not already sent' do

@ -41,14 +41,17 @@ class EchoService
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg
attr_reader :received_md
attr_accessor :on_call_started
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
@on_call_started = nil
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
on_call_started&.call(call)
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req

@ -206,3 +206,60 @@ grpc_proto_fuzzer(
"//src/core:chaotic_good_server",
],
)
grpc_cc_library(
name = "connector_fuzzer",
srcs = ["connector_fuzzer.cc"],
hdrs = ["connector_fuzzer.h"],
external_deps = ["absl/log:check"],
deps = [
"fuzzer_input_proto",
"fuzzing_common",
"network_input",
"//:gpr",
"//:grpc",
"//src/core:channel_args",
"//test/core/event_engine/fuzzing_event_engine",
"//test/core/test_util:fuzz_config_vars",
"//test/core/test_util:grpc_test_util",
"//test/core/test_util:grpc_test_util_base",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2",
srcs = ["connector_fuzzer_chttp2.cc"],
corpus = "connector_fuzzer_chttp2_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)
grpc_proto_fuzzer(
name = "connector_fuzzer_chttp2_fakesec",
srcs = ["connector_fuzzer_chttp2_fakesec.cc"],
corpus = "connector_fuzzer_chttp2_fakesec_corpus",
end2end_fuzzer = True,
language = "C++",
proto = None,
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":connector_fuzzer",
"//:grpc",
],
)

@ -0,0 +1,189 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
#include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/env.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
#include "test/core/end2end/fuzzers/network_input.h"
#include "test/core/test_util/fuzz_config_vars.h"
#include "test/core/test_util/test_config.h"
bool squelch = true;
bool leak_check = true;
using ::grpc_event_engine::experimental::ChannelArgsEndpointConfig;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::FuzzingEventEngine;
using ::grpc_event_engine::experimental::GetDefaultEventEngine;
using ::grpc_event_engine::experimental::MockEndpointController;
using ::grpc_event_engine::experimental::SetEventEngineFactory;
using ::grpc_event_engine::experimental::URIToResolvedAddress;
namespace grpc_core {
namespace {
class ConnectorFuzzer {
public:
ConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector)
: make_security_connector_(make_security_connector),
engine_([actions = msg.event_engine_actions()]() {
SetEventEngineFactory([actions]() -> std::unique_ptr<EventEngine> {
return std::make_unique<FuzzingEventEngine>(
FuzzingEventEngine::Options(), actions);
});
return std::dynamic_pointer_cast<FuzzingEventEngine>(
GetDefaultEventEngine());
}()),
mock_endpoint_controller_(MockEndpointController::Create(engine_)),
connector_(make_connector()) {
CHECK(engine_);
for (const auto& input : msg.network_input()) {
network_inputs_.push(input);
}
grpc_timer_manager_set_start_threaded(false);
grpc_init();
ExecCtx exec_ctx;
Executor::SetThreadingAll(false);
listener_ =
engine_
->CreateListener(
[this](std::unique_ptr<EventEngine::Endpoint> endpoint,
MemoryAllocator) {
if (network_inputs_.empty()) return;
ScheduleWrites(network_inputs_.front(), std::move(endpoint),
engine_.get());
network_inputs_.pop();
},
[](absl::Status) {}, ChannelArgsEndpointConfig(ChannelArgs{}),
std::make_unique<MemoryQuota>("foo"))
.value();
if (msg.has_shutdown_connector() &&
msg.shutdown_connector().delay_ms() > 0) {
auto shutdown_connector = msg.shutdown_connector();
const auto delay = Duration::Milliseconds(shutdown_connector.delay_ms());
engine_->RunAfterExactly(delay, [this, shutdown_connector = std::move(
shutdown_connector)]() {
if (connector_ == nullptr) return;
connector_->Shutdown(absl::Status(
static_cast<absl::StatusCode>(shutdown_connector.shutdown_status()),
shutdown_connector.shutdown_message()));
});
}
// Abbreviated runtime for interpreting API actions, since we simply don't
// support many here.
uint64_t when_ms = 0;
for (const auto& action : msg.api_actions()) {
switch (action.type_case()) {
default:
break;
case api_fuzzer::Action::kSleepMs:
when_ms += action.sleep_ms();
break;
case api_fuzzer::Action::kResizeResourceQuota:
engine_->RunAfterExactly(
Duration::Milliseconds(when_ms),
[this, new_size = action.resize_resource_quota()]() {
resource_quota_->memory_quota()->SetSize(new_size);
});
when_ms += 1;
break;
}
}
}
~ConnectorFuzzer() {
listener_.reset();
connector_.reset();
mock_endpoint_controller_.reset();
engine_->TickUntilIdle();
grpc_shutdown_blocking();
engine_->UnsetGlobalHooks();
}
void Run() {
grpc_resolved_address addr;
CHECK(grpc_parse_uri(URI::Parse("ipv4:127.0.0.1:1234").value(), &addr));
CHECK_OK(
listener_->Bind(URIToResolvedAddress("ipv4:127.0.0.1:1234").value()));
CHECK_OK(listener_->Start());
OrphanablePtr<grpc_endpoint> endpoint(
mock_endpoint_controller_->TakeCEndpoint());
SubchannelConnector::Result result;
bool done = false;
auto channel_args = ChannelArgs{}.SetObject<EventEngine>(engine_).SetObject(
resource_quota_);
auto security_connector = make_security_connector_();
if (security_connector != nullptr) {
channel_args = channel_args.SetObject(std::move(security_connector));
}
connector_->Connect(
SubchannelConnector::Args{&addr, nullptr,
Timestamp::Now() + Duration::Seconds(20),
channel_args},
&result, NewClosure([&done, &result](grpc_error_handle status) {
done = true;
if (status.ok()) result.transport->Orphan();
}));
while (!done) {
engine_->Tick();
grpc_timer_manager_tick();
}
}
private:
RefCountedPtr<ResourceQuota> resource_quota_ =
MakeRefCounted<ResourceQuota>("fuzzer");
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector_;
std::shared_ptr<FuzzingEventEngine> engine_;
std::queue<fuzzer_input::NetworkInput> network_inputs_;
std::shared_ptr<MockEndpointController> mock_endpoint_controller_;
std::unique_ptr<EventEngine::Listener> listener_;
OrphanablePtr<SubchannelConnector> connector_;
};
} // namespace
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector) {
if (squelch && !GetEnv("GRPC_TRACE_FUZZER").has_value()) {
grpc_disable_all_absl_logs();
}
static const int once = []() {
ForceEnableExperiment("event_engine_client", true);
ForceEnableExperiment("event_engine_listener", true);
return 42;
}();
CHECK_EQ(once, 42); // avoid unused variable warning
ApplyFuzzConfigVars(msg.config_vars());
TestOnlyReloadExperimentsFromConfigVariables();
ConnectorFuzzer(msg, make_security_connector, make_connector).Run();
}
} // namespace grpc_core

@ -0,0 +1,34 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#define GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H
#include "absl/functional/function_ref.h"
#include "src/core/client_channel/connector.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
namespace grpc_core {
void RunConnectorFuzzer(
const fuzzer_input::Msg& msg,
absl::FunctionRef<RefCountedPtr<grpc_channel_security_connector>()>
make_security_connector,
absl::FunctionRef<OrphanablePtr<SubchannelConnector>()> make_connector);
}
#endif // GRPC_TEST_CORE_END2END_FUZZERS_CONNECTOR_FUZZER_H

@ -0,0 +1,30 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_core::RefCountedPtr<grpc_channel_security_connector>();
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -0,0 +1,36 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/credentials.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/security_connector/fake/fake_security_connector.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/connector_fuzzer.h"
DEFINE_PROTO_FUZZER(const fuzzer_input::Msg& msg) {
grpc_core::RunConnectorFuzzer(
msg,
[]() {
return grpc_fake_channel_security_connector_create(
grpc_core::RefCountedPtr<grpc_channel_credentials>(
grpc_fake_transport_security_credentials_create()),
nullptr, "foobar", grpc_core::ChannelArgs{});
},
[]() { return grpc_core::MakeOrphanable<grpc_core::Chttp2Connector>(); });
}

@ -172,6 +172,20 @@ message ChaoticGoodFrame {
message ChaoticGoodSettings {}
message FakeTransportFrame {
enum MessageString {
CLIENT_INIT = 0;
SERVER_INIT = 1;
CLIENT_FINISHED = 2;
SERVER_FINISHED = 3;
}
oneof payload {
bytes raw_bytes = 1;
MessageString message_string = 2;
}
}
message InputSegment {
int32 delay_ms = 1;
oneof payload {
@ -187,6 +201,7 @@ message InputSegment {
H2ClientPrefix client_prefix = 11;
uint32 repeated_zeros = 12;
ChaoticGoodFrame chaotic_good = 13;
FakeTransportFrame fake_transport_frame = 14;
}
}
@ -204,10 +219,18 @@ message NetworkInput {
}
}
// Only for connector fuzzer, when to drop the connector
message ShutdownConnector {
int32 delay_ms = 1;
int32 shutdown_status = 2;
string shutdown_message = 3;
}
message Msg {
repeated NetworkInput network_input = 1;
repeated api_fuzzer.Action api_actions = 2;
fuzzing_event_engine.Actions event_engine_actions = 3;
grpc.testing.FuzzConfigVars config_vars = 4;
grpc.testing.FuzzingChannelArgs channel_args = 5;
ShutdownConnector shutdown_connector = 6;
}

@ -267,6 +267,13 @@ SliceBuffer ChaoticGoodFrame(const fuzzer_input::ChaoticGoodFrame& frame) {
return out;
}
void store32_little_endian(uint32_t value, unsigned char* buf) {
buf[3] = static_cast<unsigned char>((value >> 24) & 0xFF);
buf[2] = static_cast<unsigned char>((value >> 16) & 0xFF);
buf[1] = static_cast<unsigned char>((value >> 8) & 0xFF);
buf[0] = static_cast<unsigned char>((value) & 0xFF);
}
grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
switch (segment.payload_case()) {
case fuzzer_input::InputSegment::kRawBytes:
@ -333,6 +340,38 @@ grpc_slice SliceFromSegment(const fuzzer_input::InputSegment& segment) {
.JoinIntoSlice()
.TakeCSlice();
} break;
case fuzzer_input::InputSegment::kFakeTransportFrame: {
auto generate = [](absl::string_view payload) {
uint32_t length = payload.length();
std::vector<unsigned char> bytes;
bytes.resize(4);
store32_little_endian(length + 4, bytes.data());
for (auto c : payload) {
bytes.push_back(static_cast<unsigned char>(c));
}
return grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(bytes.data()), bytes.size());
};
switch (segment.fake_transport_frame().payload_case()) {
case fuzzer_input::FakeTransportFrame::kRawBytes:
return generate(segment.fake_transport_frame().raw_bytes());
case fuzzer_input::FakeTransportFrame::kMessageString:
switch (segment.fake_transport_frame().message_string()) {
default:
return generate("UNKNOWN");
case fuzzer_input::FakeTransportFrame::CLIENT_INIT:
return generate("CLIENT_INIT");
case fuzzer_input::FakeTransportFrame::SERVER_INIT:
return generate("SERVER_INIT");
case fuzzer_input::FakeTransportFrame::CLIENT_FINISHED:
return generate("CLIENT_FINISHED");
case fuzzer_input::FakeTransportFrame::SERVER_FINISHED:
return generate("SERVER_FINISHED");
}
case fuzzer_input::FakeTransportFrame::PAYLOAD_NOT_SET:
return generate("");
}
}
case fuzzer_input::InputSegment::PAYLOAD_NOT_SET:
break;
}
@ -545,4 +584,15 @@ Duration ScheduleConnection(
return delay;
}
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine) {
auto schedule = MakeSchedule(network_input);
auto ep = std::shared_ptr<EventEngine::Endpoint>(std::move(endpoint));
ReadForever(ep);
ScheduleWritesForReads(ep, event_engine, std::move(schedule));
}
} // namespace grpc_core

@ -30,6 +30,12 @@ Duration ScheduleReads(
mock_endpoint_controller,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
void ScheduleWrites(
const fuzzer_input::NetworkInput& network_input,
std::unique_ptr<grpc_event_engine::experimental::EventEngine::Endpoint>
endpoint,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine);
Duration ScheduleConnection(
const fuzzer_input::NetworkInput& network_input,
grpc_event_engine::experimental::FuzzingEventEngine* event_engine,

@ -18,6 +18,7 @@
#include <stdlib.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <limits>
#include <vector>
@ -32,6 +33,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/dump_args.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/telemetry/stats.h"
@ -189,7 +191,15 @@ void FuzzingEventEngine::TickUntilIdle() {
while (true) {
{
grpc_core::MutexLock lock(&*mu_);
if (tasks_by_id_.empty()) return;
LOG_EVERY_N_SEC(INFO, 5)
<< "TickUntilIdle: "
<< GRPC_DUMP_ARGS(tasks_by_id_.size(), outstanding_reads_.load(),
outstanding_writes_.load());
if (tasks_by_id_.empty() &&
outstanding_writes_.load(std::memory_order_relaxed) == 0 &&
outstanding_reads_.load(std::memory_order_relaxed) == 0) {
return;
}
}
Tick();
}
@ -299,6 +309,9 @@ absl::Status FuzzingEventEngine::FuzzingListener::Start() {
bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
CHECK(!closed[index]);
const int peer_index = 1 - index;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: entry "
<< GRPC_DUMP_ARGS(data->Length());
if (data->Length() == 0) return true;
size_t write_len = std::numeric_limits<size_t>::max();
// Check the write_sizes queue for fuzzer imposed restrictions on this write
@ -315,12 +328,16 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
// byte.
if (write_len == 0) write_len = 1;
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes";
<< "WRITE[" << this << ":" << index << "]: " << write_len << " bytes; "
<< GRPC_DUMP_ARGS(pending_read[peer_index].has_value());
// Expand the pending buffer.
size_t prev_len = pending[index].size();
pending[index].resize(prev_len + write_len);
// Move bytes from the to-write data into the pending buffer.
data->MoveFirstNBytesIntoBuffer(write_len, pending[index].data() + prev_len);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "WRITE[" << this << ":" << index << "]: post-move "
<< GRPC_DUMP_ARGS(data->Length());
// If there was a pending read, then we can fulfill it.
if (pending_read[peer_index].has_value()) {
pending_read[peer_index]->buffer->Append(
@ -328,7 +345,11 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
pending[index].clear();
g_fuzzing_event_engine->RunLocked(
RunType::kWrite,
[cb = std::move(pending_read[peer_index]->on_read)]() mutable {
[cb = std::move(pending_read[peer_index]->on_read), this, peer_index,
buffer = pending_read[peer_index]->buffer]() mutable {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "FINISH_READ[" << this << ":" << peer_index
<< "]: " << GRPC_DUMP_ARGS(buffer->Length());
cb(absl::OkStatus());
});
pending_read[peer_index].reset();
@ -339,6 +360,10 @@ bool FuzzingEventEngine::EndpointMiddle::Write(SliceBuffer* data, int index) {
bool FuzzingEventEngine::FuzzingEndpoint::Write(
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
const WriteArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_WRITE[" << middle_.get() << ":" << my_index()
<< "]: " << data->Length() << " bytes";
IoToken write_token(&g_fuzzing_event_engine->outstanding_writes_);
grpc_core::global_stats().IncrementSyscallWrite();
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
@ -346,24 +371,38 @@ bool FuzzingEventEngine::FuzzingEndpoint::Write(
// If the write succeeds immediately, then we return true.
if (middle_->Write(data, my_index())) return true;
middle_->writing[my_index()] = true;
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data);
ScheduleDelayedWrite(middle_, my_index(), std::move(on_writable), data,
std::move(write_token));
return false;
}
void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data) {
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) {
g_fuzzing_event_engine->RunLocked(
RunType::kWrite, [middle = std::move(middle), index, data,
on_writable = std::move(on_writable)]() mutable {
RunType::kWrite,
[write_token = std::move(write_token), middle = std::move(middle), index,
data, on_writable = std::move(on_writable)]() mutable {
grpc_core::ReleasableMutexLock lock(&*mu_);
CHECK(middle->writing[index]);
if (middle->closed[index]) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED[" << middle.get() << ":" << index << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[on_writable = std::move(on_writable)]() mutable {
on_writable(absl::InternalError("Endpoint closed"));
});
if (middle->pending_read[1 - index].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(
middle->pending_read[1 - index]->on_read)]() mutable {
cb(absl::InternalError("Endpoint closed"));
});
middle->pending_read[1 - index].reset();
}
return;
}
if (middle->Write(data, index)) {
@ -373,14 +412,23 @@ void FuzzingEventEngine::FuzzingEndpoint::ScheduleDelayedWrite(
return;
}
ScheduleDelayedWrite(std::move(middle), index, std::move(on_writable),
data);
data, std::move(write_token));
});
}
FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
grpc_core::MutexLock lock(&*mu_);
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSE[" << middle_.get() << ":" << my_index() << "]: "
<< GRPC_DUMP_ARGS(
middle_->closed[my_index()], middle_->closed[peer_index()],
middle_->pending_read[my_index()].has_value(),
middle_->pending_read[peer_index()].has_value(),
middle_->writing[my_index()], middle_->writing[peer_index()]);
middle_->closed[my_index()] = true;
if (middle_->pending_read[my_index()].has_value()) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "CLOSED_READING[" << middle_.get() << ":" << my_index() << "]";
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
[cb = std::move(middle_->pending_read[my_index()]->on_read)]() mutable {
@ -388,7 +436,7 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
});
middle_->pending_read[my_index()].reset();
}
if (!middle_->writing[peer_index()] &&
if (!middle_->writing[my_index()] &&
middle_->pending_read[peer_index()].has_value()) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter,
@ -403,20 +451,25 @@ FuzzingEventEngine::FuzzingEndpoint::~FuzzingEndpoint() {
bool FuzzingEventEngine::FuzzingEndpoint::Read(
absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer,
const ReadArgs*) {
GRPC_TRACE_LOG(fuzzing_ee_writes, INFO)
<< "START_READ[" << middle_.get() << ":" << my_index() << "]";
buffer->Clear();
IoToken read_token(&g_fuzzing_event_engine->outstanding_reads_);
grpc_core::MutexLock lock(&*mu_);
CHECK(!middle_->closed[my_index()]);
if (middle_->pending[peer_index()].empty()) {
// If the endpoint is closed, fail asynchronously.
if (middle_->closed[peer_index()]) {
g_fuzzing_event_engine->RunLocked(
RunType::kRunAfter, [on_read = std::move(on_read)]() mutable {
RunType::kRunAfter,
[read_token, on_read = std::move(on_read)]() mutable {
on_read(absl::InternalError("Endpoint closed"));
});
return false;
}
// If the endpoint has no pending data, then we need to wait for a write.
middle_->pending_read[my_index()] = PendingRead{std::move(on_read), buffer};
middle_->pending_read[my_index()] =
PendingRead{std::move(read_token), std::move(on_read), buffer};
return false;
} else {
// If the endpoint has pending data, then we can fulfill the read

@ -17,6 +17,7 @@
#include <stddef.h>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <map>
@ -124,6 +125,36 @@ class FuzzingEventEngine : public EventEngine {
}
private:
class IoToken {
public:
IoToken() : refs_(nullptr) {}
explicit IoToken(std::atomic<size_t>* refs) : refs_(refs) {
refs_->fetch_add(1, std::memory_order_relaxed);
}
~IoToken() {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
}
IoToken(const IoToken& other) : refs_(other.refs_) {
if (refs_ != nullptr) refs_->fetch_add(1, std::memory_order_relaxed);
}
IoToken& operator=(const IoToken& other) {
IoToken copy(other);
Swap(copy);
return *this;
}
IoToken(IoToken&& other) noexcept
: refs_(std::exchange(other.refs_, nullptr)) {}
IoToken& operator=(IoToken&& other) noexcept {
if (refs_ != nullptr) refs_->fetch_sub(1, std::memory_order_relaxed);
refs_ = std::exchange(other.refs_, nullptr);
return *this;
}
void Swap(IoToken& other) { std::swap(refs_, other.refs_); }
private:
std::atomic<size_t>* refs_;
};
enum class RunType {
kWrite,
kRunAfter,
@ -183,6 +214,8 @@ class FuzzingEventEngine : public EventEngine {
// One read that's outstanding.
struct PendingRead {
// The associated io token
IoToken io_token;
// Callback to invoke when the read completes.
absl::AnyInvocable<void(absl::Status)> on_read;
// The buffer to read into.
@ -243,8 +276,8 @@ class FuzzingEventEngine : public EventEngine {
// endpoint shutdown, it's believed this is a legal implementation.
static void ScheduleDelayedWrite(
std::shared_ptr<EndpointMiddle> middle, int index,
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
absl::AnyInvocable<void(absl::Status)> on_writable, SliceBuffer* data,
IoToken write_token) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::shared_ptr<EndpointMiddle> middle_;
const int index_;
};
@ -299,6 +332,8 @@ class FuzzingEventEngine : public EventEngine {
std::queue<std::queue<size_t>> write_sizes_for_future_connections_
ABSL_GUARDED_BY(mu_);
grpc_pick_port_functions previous_pick_port_functions_;
std::atomic<size_t> outstanding_writes_{0};
std::atomic<size_t> outstanding_reads_{0};
grpc_core::Mutex run_after_duration_callback_mu_;
absl::AnyInvocable<void(Duration)> run_after_duration_callback_

@ -2,21 +2,24 @@
This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix).
The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack
The setup builds gRPC docker images for each language/runtime and upload it to Artifact Registry (AR). These images, encapsulating gRPC stack
from specific releases/tag, are used to test version compatibility between gRPC release versions.
## Step-by-step instructions for adding a GCR image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are:
## Step-by-step instructions for adding a AR docker image for a new release for compatibility test
We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server.
When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test.
The steps are:
- Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release.
- Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do
- `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx python ruby php`
- Verify that the new docker image was built successfully and uploaded to GCR. For example,
- `gcloud container images list --repository gcr.io/grpc-testing` lists available images.
- `gcloud container images list-tags gcr.io/grpc-testing/grpc_interop_java` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/gcr/images/grpc-testing?project=grpc-testing
- Verify that the new docker image was built successfully and uploaded to AR. For example,
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public` lists available images.
- `gcloud artifacts docker images list us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java --include-tags` should show an image entry with tag `v1.9.9`.
- images can also be viewed in https://pantheon.corp.google.com/artifacts/docker/grpc-testing/us/testing-images-public
- Verify the just-created docker client image would pass backward compatibility test (it should). For example,
- `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java:v1.9.9` followed by
- `docker_image=gcr.io/grpc-testing/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9` followed by
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master`
- Commit the change and create a PR to upstream/master.
- Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc
- Once tests pass, request a PR review.
@ -24,30 +27,34 @@ We have continuous nightly test setup to test gRPC backward compatibility betwee
For more details on each step, refer to sections below.
## Instructions for adding new language/runtimes
- Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`.
- Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`.
- Add language/runtimes to `client_matrix.py` following existing language/runtimes examples.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to GCR.
- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to AR.
## Instructions for creating new test cases
- Create test cases by running `LANG=<lang> [RELEASE=<release>] ./create_testcases.sh`. For example,
- `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script.
- `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi <image_id>`.
- Stage and commit the generated test case file `./testcases/<lang>__<release>`.
## Instructions for running test cases against GCR images
## Instructions for running test cases against AR docker images
- Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options:
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the GCR images with the tag have been created using `create_matrix_images.py` above.
- `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the AR images with the tag have been created using `create_matrix_images.py` above.
- `--language` specifies a language. Defaults to `--language=all`.
For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`.
- The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml').
## Instructions for running test cases against a GCR image manually
- Download docker image from GCR. For example: `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0`.
## Instructions for running test cases against an AR image manually
- Download a docker image from AR. For example: `docker pull us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0`.
- Run test cases by specifying `docker_image` variable inline with the test case script created above.
For example:
- `docker_image=gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in GCR.
- `docker_image=us-docker.pkg.dev/grpc-testing/testing-images-public/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in AR.
Note:
- File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`).
- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing.
- Creating and referencing images in AR require read and write permission to AR path us-docker.pkg.dev/grpc-testing.

@ -53,9 +53,9 @@ _BUILD_INFO = "/var/local/build_info"
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
@ -175,7 +175,7 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
stack_base: the local gRPC repo path.
"""
basename = "grpc_interop_%s" % runtime
tag = "%s/%s:%s" % (args.gcr_path, basename, gcr_tag)
tag = "%s/%s:%s" % (args.docker_path, basename, gcr_tag)
build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
@ -407,8 +407,8 @@ for lang in languages:
for image in docker_images:
if args.upload_images:
jobset.message("START", "Uploading %s" % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(":") != -1
# docker image name must be in the format <docker_path>/<image>:<gcr_tag>
assert image.startswith(args.docker_path) and image.find(":") != -1
# Add a tag to exclude the image from the GCP Vulnerability Scanner.
(image_name, tag_name) = image.rsplit(":", 1)
alternate_image = (

@ -56,9 +56,9 @@ _RELEASES = sorted(
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
"--docker_path",
default="us-docker.pkg.dev/grpc-testing/testing-images-public",
help="Path of docker images",
)
argp.add_argument(
"--release",
@ -348,7 +348,9 @@ languages = args.language if args.language != ["all"] else _LANGUAGES
total_num_failures = 0
_xml_report_tree = report_utils.new_junit_xml_tree()
for lang in languages:
docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path)
docker_images = _get_test_images_for_lang(
lang, args.release, args.docker_path
)
for runtime in sorted(docker_images.keys()):
total_num_failures += _run_tests_for_lang(
lang, runtime, docker_images[runtime], _xml_report_tree

@ -20,11 +20,8 @@ import %workspace%/tools/remote_build/include/rbe_base_config.bazelrc
# configure backend for remote execution
build --remote_executor=grpcs://remotebuildexecution.googleapis.com
build --spawn_strategy=remote
build --strategy=Javac=remote
build --strategy=Closure=remote
build --genrule_strategy=remote
build --remote_timeout=7200 # very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
# Very large value to avoid problems like https://github.com/grpc/grpc/issues/20777
build --remote_timeout=7200
# In the remote execution environment, each test gets its own docker containers
# and port server won't be available.

Loading…
Cancel
Save