[grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log (#36939)

[grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log
In this CL we are migrating from gRPCs own gpr logging mechanism to absl logging mechanism. The intention is to deprecate gpr_log in the future.

We have the following mapping

1. gpr_log(GPR_INFO,...) -> LOG(INFO)
2. gpr_log(GPR_ERROR,...) -> LOG(ERROR)
3. gpr_log(GPR_DEBUG,...) -> VLOG(2)

Reviewers need to check :

1. If the above mapping is correct.
2. The content of the log is as before.
gpr_log format strings did not use string_view or std::string . absl LOG accepts these. So there will be some elimination of string_view and std::string related conversions. This is expected.

Closes #36939

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/36939 from tanvi-jagtap:src_core_client_channel 2dc26fe18a
PiperOrigin-RevId: 643966218
pull/36941/head^2
Tanvi Jagtap 9 months ago committed by Copybara-Service
parent b4fa67a452
commit 205783fc4f
  1. 32
      src/core/client_channel/client_channel.cc
  2. 67
      src/core/client_channel/client_channel_filter.cc
  3. 8
      src/core/client_channel/load_balanced_call_destination.cc
  4. 349
      src/core/client_channel/retry_filter_legacy_call_data.cc
  5. 43
      src/core/client_channel/subchannel.cc
  6. 5
      tools/run_tests/sanity/banned_functions.py

@ -28,6 +28,7 @@
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
@ -43,7 +44,6 @@
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/json.h>
#include <grpc/support/log.h>
#include <grpc/support/metrics.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
@ -283,10 +283,10 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
}
}
} else {
gpr_log(GPR_ERROR,
"client_channel=%p: Illegal keepalive throttling value %s",
subchannel_wrapper_->client_channel_.get(),
std::string(keepalive_throttling.value()).c_str());
LOG(ERROR) << "client_channel="
<< subchannel_wrapper_->client_channel_.get()
<< ": Illegal keepalive throttling value "
<< std::string(keepalive_throttling.value());
}
}
// Propagate status only in state TF.
@ -453,10 +453,10 @@ class ClientChannel::ClientChannelControlHelper
const char* extra = client_channel_->disconnect_error_.ok()
? ""
: " (ignoring -- channel shutting down)";
gpr_log(GPR_INFO,
"client_channel=%p: update: state=%s status=(%s) picker=%p%s",
client_channel_.get(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get(), extra);
LOG(INFO) << "client_channel=" << client_channel_.get()
<< ": update: state=" << ConnectivityStateName(state)
<< " status=(" << status << ") picker=" << picker.get()
<< extra;
}
// Do update only if not shutting down.
if (client_channel_->disconnect_error_.ok()) {
@ -907,15 +907,13 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
.LoadBalancingPolicyExists(*policy_name, &requires_config) ||
requires_config)) {
if (requires_config) {
gpr_log(GPR_ERROR,
"LB policy: %s passed through channel_args must not "
"require a config. Using pick_first instead.",
std::string(*policy_name).c_str());
LOG(ERROR) << "LB policy: " << *policy_name
<< " passed through channel_args must not "
"require a config. Using pick_first instead.";
} else {
gpr_log(GPR_ERROR,
"LB policy: %s passed through channel_args does not exist. "
"Using pick_first instead.",
std::string(*policy_name).c_str());
LOG(ERROR) << "LB policy: " << *policy_name
<< " passed through channel_args does not exist. "
"Using pick_first instead.";
}
policy_name = "pick_first";
}

@ -31,6 +31,7 @@
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
@ -46,7 +47,6 @@
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/json.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
@ -705,9 +705,9 @@ class ClientChannelFilter::SubchannelWrapper final
}
}
} else {
gpr_log(GPR_ERROR, "chand=%p: Illegal keepalive throttling value %s",
parent_->chand_,
std::string(keepalive_throttling.value()).c_str());
LOG(ERROR) << "chand=" << parent_->chand_
<< ": Illegal keepalive throttling value "
<< std::string(keepalive_throttling.value());
}
}
// Propagate status only in state TF.
@ -1210,15 +1210,13 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
.LoadBalancingPolicyExists(*policy_name, &requires_config) ||
requires_config)) {
if (requires_config) {
gpr_log(GPR_ERROR,
"LB policy: %s passed through channel_args must not "
"require a config. Using pick_first instead.",
std::string(*policy_name).c_str());
LOG(ERROR) << "LB policy: " << *policy_name
<< " passed through channel_args must not "
"require a config. Using pick_first instead.";
} else {
gpr_log(GPR_ERROR,
"LB policy: %s passed through channel_args does not exist. "
"Using pick_first instead.",
std::string(*policy_name).c_str());
LOG(ERROR) << "LB policy: " << *policy_name
<< " passed through channel_args does not exist. "
"Using pick_first instead.";
}
policy_name = "pick_first";
}
@ -2016,8 +2014,9 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
auto* chand = static_cast<ClientChannelFilter*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(client_channel_call) &&
!GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from above: %s", chand,
calld, grpc_transport_stream_op_batch_string(batch, false).c_str());
LOG(INFO) << "chand=" << chand << " calld=" << calld
<< ": batch started from above: "
<< grpc_transport_stream_op_batch_string(batch, false);
}
// Intercept recv_trailing_metadata to commit the call, in case we wind up
// failing the call before we get down to the retry or LB call layer.
@ -2159,9 +2158,8 @@ void ClientChannelFilter::FilterBasedCallData::PendingBatchesFail(
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
}
gpr_log(GPR_INFO,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
chand(), this, num_batches, StatusToString(error).c_str());
LOG(INFO) << "chand=" << chand() << " calld=" << this << ": failing "
<< num_batches << " pending batches: " << StatusToString(error);
}
CallCombinerClosureList closures;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2202,10 +2200,9 @@ void ClientChannelFilter::FilterBasedCallData::PendingBatchesResume() {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
}
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" pending batches on dynamic_call=%p",
chand(), this, num_batches, dynamic_call_.get());
LOG(INFO) << "chand=" << chand() << " calld=" << this << ": starting "
<< num_batches
<< " pending batches on dynamic_call=" << dynamic_call_.get();
}
CallCombinerClosureList closures;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2378,10 +2375,8 @@ class ClientChannelFilter::LoadBalancedCall::Metadata final
}
batch_->Append(key, Slice::FromStaticString(value),
[key](absl::string_view error, const Slice& value) {
gpr_log(GPR_ERROR, "%s",
absl::StrCat(error, " key:", key,
" value:", value.as_string_view())
.c_str());
LOG(ERROR) << error << " key:" << key
<< " value:" << value.as_string_view();
});
}
@ -2847,9 +2842,8 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesFail(
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
}
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: failing %" PRIuPTR " pending batches: %s",
chand(), this, num_batches, StatusToString(error).c_str());
LOG(INFO) << "chand=" << chand() << " lb_call=" << this << ": failing "
<< num_batches << " pending batches: " << StatusToString(error);
}
CallCombinerClosureList closures;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2889,10 +2883,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesResume() {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
}
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: starting %" PRIuPTR
" pending batches on subchannel_call=%p",
chand(), this, num_batches, subchannel_call_.get());
LOG(INFO) << "chand=" << chand() << " lb_call=" << this << ": starting "
<< num_batches << " pending batches on subchannel_call="
<< subchannel_call_.get();
}
CallCombinerClosureList closures;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2915,12 +2908,10 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call) ||
GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: batch started from above: %s, "
"call_attempt_tracer()=%p",
chand(), this,
grpc_transport_stream_op_batch_string(batch, false).c_str(),
call_attempt_tracer());
LOG(INFO) << "chand=" << chand() << " lb_call=" << this
<< ": batch started from above: "
<< grpc_transport_stream_op_batch_string(batch, false)
<< ", call_attempt_tracer()=" << call_attempt_tracer();
}
// Handle call tracing.
if (call_attempt_tracer() != nullptr) {

@ -14,6 +14,8 @@
#include "src/core/client_channel/load_balanced_call_destination.h"
#include "absl/log/log.h"
#include "src/core/client_channel/client_channel.h"
#include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/subchannel.h"
@ -43,10 +45,8 @@ class LbMetadata : public LoadBalancingPolicy::MetadataInterface {
}
batch_->Append(key, Slice::FromStaticString(value),
[key](absl::string_view error, const Slice& value) {
gpr_log(GPR_ERROR, "%s",
absl::StrCat(error, " key:", key,
" value:", value.as_string_view())
.c_str());
LOG(ERROR) << error << " key:" << key
<< " value:" << value.as_string_view();
});
}

@ -22,11 +22,10 @@
#include <new>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include <grpc/support/log.h>
#include "src/core/client_channel/client_channel_internal.h"
#include "src/core/client_channel/retry_service_config.h"
#include "src/core/client_channel/retry_throttle.h"
@ -141,9 +140,9 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
},
is_transparent_retry);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: created attempt, lb_call=%p",
calld->chand_, calld, this, lb_call_.get());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this
<< ": created attempt, lb_call=" << lb_call_.get();
}
// If per_attempt_recv_timeout is set, start a timer.
if (calld->retry_policy_ != nullptr &&
@ -151,10 +150,9 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
const Duration per_attempt_recv_timeout =
*calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64
" ms",
calld->chand_, calld, this, per_attempt_recv_timeout.millis());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this << ": per-attempt timeout in "
<< per_attempt_recv_timeout.millis() << " ms";
}
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(calld->owning_call_, "OnPerAttemptRecvTimer");
@ -170,8 +168,8 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
RetryFilter::LegacyCallData::CallAttempt::~CallAttempt() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying call attempt",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": destroying call attempt";
}
}
@ -237,10 +235,10 @@ void RetryFilter::LegacyCallData::CallAttempt::MaybeSwitchToFastPath() {
if (recv_trailing_metadata_internal_batch_ != nullptr) return;
// Switch to fast path.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt";
}
calld_->committed_call_ = std::move(lb_call_);
calld_->call_attempt_.reset(DEBUG_LOCATION, "MaybeSwitchToFastPath");
@ -256,10 +254,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
if (calld_->seen_send_initial_metadata_ && !started_send_initial_metadata_ &&
!calld_->pending_send_initial_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_initial_metadata op",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_initial_metadata op";
}
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
replay_batch_data->AddRetriableSendInitialMetadataOp();
@ -270,10 +268,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
started_send_message_count_ == completed_send_message_count_ &&
!calld_->pending_send_message_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_message op",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_message op";
}
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
@ -289,10 +287,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
!started_send_trailing_metadata_ &&
!calld_->pending_send_trailing_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_trailing_metadata op",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_trailing_metadata op";
}
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
@ -320,9 +318,9 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
grpc_transport_stream_op_batch* batch, const char* reason,
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: adding batch (%s): %s",
calld_->chand_, calld_, this, reason,
grpc_transport_stream_op_batch_string(batch, false).c_str());
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": adding batch (" << reason
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
}
batch->handler_private.extra_arg = lb_call_.get();
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
@ -333,10 +331,10 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
void RetryFilter::LegacyCallData::CallAttempt::
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call failed but "
"recv_trailing_metadata not started; starting it internally",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": call failed but recv_trailing_metadata not started; "
"starting it internally";
}
// Create batch_data with 2 refs, since this batch will be unreffed twice:
// once for the recv_trailing_metadata_ready callback when the batch
@ -523,9 +521,8 @@ void RetryFilter::LegacyCallData::CallAttempt::AddRetriableBatches(
void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: constructing retriable batches",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": constructing retriable batches";
}
// Construct list of closures to execute, one for each pending batch.
CallCombinerClosureList closures;
@ -533,10 +530,9 @@ void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
// Note: This will yield the call combiner.
// Start batches on LB call.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting %" PRIuPTR
" retriable batches on lb_call=%p",
calld_->chand_, calld_, this, closures.size(), lb_call_.get());
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": starting " << closures.size()
<< " retriable batches on lb_call=" << lb_call_.get();
}
closures.RunClosures(calld_->call_combiner_);
}
@ -561,19 +557,18 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
calld_->retry_throttle_data_->RecordSuccess();
}
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: call succeeded",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": call succeeded";
}
return false;
}
// Status is not OK. Check whether the status is retryable.
if (!calld_->retry_policy_->retryable_status_codes().Contains(*status)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: status %s not configured as "
"retryable",
calld_->chand_, calld_, this,
grpc_status_code_to_string(*status));
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": status "
<< grpc_status_code_to_string(*status)
<< " not configured as retryable";
}
return false;
}
@ -588,17 +583,16 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (calld_->retry_throttle_data_ != nullptr &&
!calld_->retry_throttle_data_->RecordFailure()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: retries throttled",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries throttled";
}
return false;
}
// Check whether the call is committed.
if (calld_->retry_committed_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retries already committed",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries already committed";
}
return false;
}
@ -607,9 +601,9 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (calld_->num_attempts_completed_ >=
calld_->retry_policy_->max_attempts()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO, "chand=%p calld=%p attempt=%p: exceeded %d retry attempts",
calld_->chand_, calld_, this, calld_->retry_policy_->max_attempts());
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": exceeded "
<< calld_->retry_policy_->max_attempts() << " retry attempts";
}
return false;
}
@ -617,19 +611,16 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (server_pushback.has_value()) {
if (*server_pushback < Duration::Zero()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: not retrying due to server "
"push-back",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": not retrying due to server push-back";
}
return false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64
" ms",
calld_->chand_, calld_, this, server_pushback->millis());
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": server push-back: retry in "
<< server_pushback->millis() << " ms";
}
}
}
@ -675,11 +666,12 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
auto* call_attempt = static_cast<CallAttempt*>(arg);
auto* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: perAttemptRecvTimeout timer fired: "
"error=%s, per_attempt_recv_timer_handle_.has_value()=%d",
calld->chand_, calld, call_attempt, StatusToString(error).c_str(),
call_attempt->per_attempt_recv_timer_handle_.has_value());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": perAttemptRecvTimeout timer fired: error="
<< StatusToString(error)
<< ", per_attempt_recv_timer_handle_.has_value()="
<< call_attempt->per_attempt_recv_timer_handle_.has_value();
}
CallCombinerClosureList closures;
call_attempt->per_attempt_recv_timer_handle_.reset();
@ -714,10 +706,9 @@ void RetryFilter::LegacyCallData::CallAttempt::
MaybeCancelPerAttemptRecvTimer() {
if (per_attempt_recv_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: cancelling "
"perAttemptRecvTimeout timer",
calld_->chand_, calld_, this);
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": cancelling perAttemptRecvTimeout timer";
}
if (calld_->chand_->event_engine()->Cancel(
*per_attempt_recv_timer_handle_)) {
@ -738,9 +729,9 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
refcount),
call_attempt_(attempt.release()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: creating batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
this);
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": creating batch " << this;
}
// We hold a ref to the call stack for every batch sent on a call attempt.
// This is because some batches on the call attempt may not complete
@ -759,9 +750,9 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
RetryFilter::LegacyCallData::CallAttempt::BatchData::~BatchData() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
this);
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": destroying batch " << this;
}
CallAttempt* call_attempt = std::exchange(call_attempt_, nullptr);
grpc_call_stack* owning_call = call_attempt->calld_->owning_call_;
@ -832,11 +823,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_initial_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
StatusToString(error).c_str());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_initial_metadata_ready, error="
<< StatusToString(error);
}
call_attempt->completed_recv_initial_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
@ -859,10 +850,9 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring "
"recv_initial_metadata_ready (Trailers-Only)",
calld->chand_, calld, call_attempt);
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
}
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
std::move(batch_data);
@ -931,11 +921,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_message_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
StatusToString(error).c_str());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_message_ready, error=" << StatusToString(error);
}
++call_attempt->completed_recv_message_count_;
// If this attempt has been abandoned, then we're not going to use the
@ -961,10 +950,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
(!call_attempt->recv_message_.has_value() || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
"(nullptr message and recv_trailing_metadata pending)",
calld->chand_, calld, call_attempt);
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_message_ready (nullptr message and "
"recv_trailing_metadata pending)";
}
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
call_attempt->recv_message_error_ = error;
@ -1125,11 +1114,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_trailing_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
StatusToString(error).c_str());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_trailing_metadata_ready, error="
<< StatusToString(error);
}
call_attempt->completed_recv_trailing_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
@ -1152,17 +1141,15 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
GetCallStatus(calld->deadline_, md_batch, error, &status, &server_pushback,
&is_lb_drop, &stream_network_state);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s "
"server_pushback=%s is_lb_drop=%d stream_network_state=%s",
calld->chand_, calld, call_attempt,
grpc_status_code_to_string(status),
server_pushback.has_value() ? server_pushback->ToString().c_str()
: "N/A",
is_lb_drop,
stream_network_state.has_value()
? absl::StrCat(*stream_network_state).c_str()
: "N/A");
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": call finished, status="
<< grpc_status_code_to_string(status) << " server_pushback="
<< (server_pushback.has_value() ? server_pushback->ToString()
: "N/A")
<< " is_lb_drop=" << is_lb_drop << " stream_network_state="
<< (stream_network_state.has_value()
? absl::StrCat(*stream_network_state)
: "N/A");
}
// Check if we should retry.
if (!is_lb_drop) { // Never retry on LB drops.
@ -1273,10 +1260,9 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
}
if (have_pending_send_ops) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting next batch for pending "
"send op(s)",
calld->chand_, calld, call_attempt_);
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting next batch for pending send op(s)";
}
call_attempt_->AddRetriableBatches(closures);
}
@ -1288,13 +1274,13 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
StatusToString(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_, false)
.c_str());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got on_complete, error=" << StatusToString(error)
<< ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
false);
}
// If this attempt has been abandoned, then we're not going to propagate
// the completion of this batch, so do nothing.
@ -1310,8 +1296,8 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
calld->chand_, calld, call_attempt);
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": deferring on_complete";
}
call_attempt->on_complete_deferred_batches_.emplace_back(
std::move(batch_data), error);
@ -1364,19 +1350,18 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnCompleteForCancelOp(
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete for cancel_stream batch, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
StatusToString(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_, false)
.c_str());
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got on_complete for cancel_stream batch, error="
<< StatusToString(error) << ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
false);
}
GRPC_CALL_COMBINER_STOP(
calld->call_combiner_,
"on_complete for internally generated cancel_stream op");
}
//
// retriable batch construction
//
@ -1408,12 +1393,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
AddRetriableSendMessageOp() {
auto* calld = call_attempt_->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: starting calld->send_messages[%" PRIuPTR
"]",
calld->chand_, calld, call_attempt_,
call_attempt_->started_send_message_count_);
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting calld->send_messages["
<< call_attempt_->started_send_message_count_ << "]";
}
CachedSendMessage cache =
calld->send_messages_[call_attempt_->started_send_message_count_];
@ -1497,8 +1480,8 @@ grpc_error_handle RetryFilter::LegacyCallData::Init(
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
new (elem->call_data) RetryFilter::LegacyCallData(chand, *args);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", chand,
elem->call_data);
LOG(INFO) << "chand=" << chand << " calld=" << elem->call_data
<< ": created call";
}
return absl::OkStatus();
}
@ -1579,9 +1562,9 @@ RetryFilter::LegacyCallData::~LegacyCallData() {
void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
grpc_transport_stream_op_batch* batch) {
if (GRPC_TRACE_FLAG_ENABLED(retry) && !GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from surface: %s",
chand_, this,
grpc_transport_stream_op_batch_string(batch, false).c_str());
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": batch started from surface: "
<< grpc_transport_stream_op_batch_string(batch, false);
}
// If we have an LB call, delegate to the LB call.
if (committed_call_ != nullptr) {
@ -1602,8 +1585,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// Save cancel_error in case subsequent batches are started.
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_,
this, StatusToString(cancelled_from_surface_).c_str());
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelled from surface: "
<< StatusToString(cancelled_from_surface_);
}
// Fail any pending batches.
PendingBatchesFail(cancelled_from_surface_);
@ -1625,8 +1609,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// Cancel retry timer if needed.
if (retry_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling retry timer", chand_,
this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelling retry timer";
}
if (chand_->event_engine()->Cancel(*retry_timer_handle_)) {
GRPC_CALL_STACK_UNREF(owning_call_, "OnRetryTimer");
@ -1671,10 +1655,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
(retry_policy_ == nullptr ||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retry committed before first attempt; "
"creating LB call",
chand_, this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retry committed before first attempt; "
<< "creating LB call";
}
PendingBatchClear(pending);
auto* service_config_call_data =
@ -1690,8 +1673,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// The attempt will automatically start any necessary replays or
// pending batches.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: creating call attempt", chand_,
this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": creating call attempt";
}
retry_codepath_started_ = true;
CreateCallAttempt(/*is_transparent_retry=*/false);
@ -1699,8 +1682,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
}
// Send batches to call attempt.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on attempt=%p", chand_,
this, call_attempt_.get());
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": starting batch on attempt=" << call_attempt_.get();
}
call_attempt_->StartRetriableBatches();
}
@ -1757,8 +1740,8 @@ void RetryFilter::LegacyCallData::MaybeCacheSendOpsForBatch(
void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_initial_metadata",
chand_, this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_initial_metadata";
}
send_initial_metadata_.Clear();
}
@ -1766,9 +1749,8 @@ void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
if (send_messages_[idx].slices != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying send_messages[%" PRIuPTR "]",
chand_, this, idx);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_messages[" << idx << "]";
}
Destruct(std::exchange(send_messages_[idx].slices, nullptr));
}
@ -1776,8 +1758,8 @@ void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
void RetryFilter::LegacyCallData::FreeCachedSendTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_trailing_metadata",
chand_, this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_trailing_metadata";
}
send_trailing_metadata_.Clear();
}
@ -1815,9 +1797,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": adding pending batch at index " << idx;
}
PendingBatch* pending = &pending_batches_[idx];
CHECK_EQ(pending->batch, nullptr);
@ -1846,9 +1827,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
chand_->per_rpc_retry_buffer_size())) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand_, this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": exceeded retry buffer size, committing";
}
RetryCommit(call_attempt_.get());
}
@ -1883,8 +1863,8 @@ void RetryFilter::LegacyCallData::MaybeClearPendingBatch(
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand_,
this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": clearing pending batch";
}
PendingBatchClear(pending);
}
@ -1910,9 +1890,8 @@ void RetryFilter::LegacyCallData::PendingBatchesFail(grpc_error_handle error) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i].batch != nullptr) ++num_batches;
}
gpr_log(GPR_INFO,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
chand_, this, num_batches, StatusToString(error).c_str());
LOG(INFO) << "chand=" << chand_ << " calld=" << this << ": failing "
<< num_batches << " pending batches: " << StatusToString(error);
}
CallCombinerClosureList closures;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -1940,9 +1919,8 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR,
chand_, this, log_message, i);
LOG(INFO) << "chand=" << chand_ << " calld=" << this << ": "
<< log_message << " pending batch at index " << i;
}
return pending;
}
@ -1958,7 +1936,8 @@ void RetryFilter::LegacyCallData::RetryCommit(CallAttempt* call_attempt) {
if (retry_committed_) return;
retry_committed_ = true;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand_, this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": committing retries";
}
if (call_attempt != nullptr) {
// If the call attempt's LB call has been committed, invoke the
@ -1992,9 +1971,9 @@ void RetryFilter::LegacyCallData::StartRetryTimer(
next_attempt_timeout = retry_backoff_.NextAttemptTime() - Timestamp::Now();
}
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_,
this, next_attempt_timeout.millis());
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retrying failed call in " << next_attempt_timeout.millis()
<< " ms";
}
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
@ -2025,8 +2004,8 @@ void RetryFilter::LegacyCallData::OnRetryTimerLocked(
void RetryFilter::LegacyCallData::AddClosureToStartTransparentRetry(
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: scheduling transparent retry", chand_,
this);
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": scheduling transparent retry";
}
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
GRPC_CLOSURE_INIT(&retry_closure_, StartTransparentRetry, this, nullptr);

@ -37,7 +37,6 @@
#include <grpc/impl/channel_arg_names.h>
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/log.h>
#include "src/core/channelz/channel_trace.h"
#include "src/core/channelz/channelz.h"
@ -418,10 +417,10 @@ class Subchannel::ConnectedSubchannelStateWatcher final
if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
new_state == GRPC_CHANNEL_SHUTDOWN) {
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO,
"subchannel %p %s: Connected subchannel %p reports %s: %s", c,
c->key_.ToString().c_str(), c->connected_subchannel_.get(),
ConnectivityStateName(new_state), status.ToString().c_str());
LOG(INFO) << "subchannel " << c << " " << c->key_.ToString()
<< ": Connected subchannel "
<< c->connected_subchannel_.get() << " reports "
<< ConnectivityStateName(new_state) << ": " << status;
}
c->connected_subchannel_.reset();
if (c->channelz_node() != nullptr) {
@ -603,8 +602,8 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
if (new_keepalive_time > keepalive_time_) {
keepalive_time_ = new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO, "subchannel %p %s: throttling keepalive time to %d",
this, key_.ToString().c_str(), new_keepalive_time);
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": throttling keepalive time to " << new_keepalive_time;
}
args_ = args_.Set(GRPC_ARG_KEEPALIVE_TIME_MS, new_keepalive_time);
}
@ -758,8 +757,8 @@ void Subchannel::OnRetryTimer() {
void Subchannel::OnRetryTimerLocked() {
if (shutdown_) return;
gpr_log(GPR_INFO, "subchannel %p %s: backoff delay elapsed, reporting IDLE",
this, key_.ToString().c_str());
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": backoff delay elapsed, reporting IDLE";
SetConnectivityStateLocked(GRPC_CHANNEL_IDLE, absl::OkStatus());
}
@ -803,11 +802,10 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
if (connecting_result_.transport == nullptr || !PublishTransportLocked()) {
const Duration time_until_next_attempt =
next_attempt_time_ - Timestamp::Now();
gpr_log(GPR_INFO,
"subchannel %p %s: connect failed (%s), backing off for %" PRId64
" ms",
this, key_.ToString().c_str(), StatusToString(error).c_str(),
time_until_next_attempt.millis());
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": connect failed (" << StatusToString(error)
<< "), backing off for " << time_until_next_attempt.millis()
<< " ms";
SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_to_absl_status(error));
retry_timer_handle_ = event_engine_->RunAfter(
@ -844,9 +842,8 @@ bool Subchannel::PublishTransportLocked() {
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> stack = builder.Build();
if (!stack.ok()) {
connecting_result_.Reset();
gpr_log(GPR_ERROR,
"subchannel %p %s: error initializing subchannel stack: %s", this,
key_.ToString().c_str(), stack.status().ToString().c_str());
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
<< ": error initializing subchannel stack: " << stack.status();
return false;
}
connected_subchannel_ = MakeRefCounted<LegacyConnectedSubchannel>(
@ -865,10 +862,9 @@ bool Subchannel::PublishTransportLocked() {
auto call_destination = builder.Build(transport_destination);
if (!call_destination.ok()) {
connecting_result_.Reset();
gpr_log(GPR_ERROR,
"subchannel %p %s: error initializing subchannel stack: %s", this,
key_.ToString().c_str(),
call_destination.status().ToString().c_str());
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
<< ": error initializing subchannel stack: "
<< call_destination.status();
return false;
}
connected_subchannel_ = MakeRefCounted<NewConnectedSubchannel>(
@ -878,8 +874,9 @@ bool Subchannel::PublishTransportLocked() {
connecting_result_.Reset();
// Publish.
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO, "subchannel %p %s: new connected subchannel at %p", this,
key_.ToString().c_str(), connected_subchannel_.get());
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": new connected subchannel at "
<< connected_subchannel_.get();
}
if (channelz_node_ != nullptr) {
channelz_node_->SetChildSocket(std::move(socket_node));

@ -50,11 +50,6 @@ BANNED_EXCEPT = {
],
"gpr_log(": [
"./include/grpc/support/log.h",
"./src/core/client_channel/client_channel.cc",
"./src/core/client_channel/client_channel_filter.cc",
"./src/core/client_channel/load_balanced_call_destination.cc",
"./src/core/client_channel/retry_filter_legacy_call_data.cc",
"./src/core/client_channel/subchannel.cc",
"./src/core/ext/filters/backend_metrics/backend_metric_filter.cc",
"./src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc",
"./src/core/ext/filters/fault_injection/fault_injection_filter.cc",

Loading…
Cancel
Save