[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED (#37351)

[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED

Used find and replace for this PR

Find String :

```
if \(GRPC_TRACE_FLAG_ENABLED\((.*)\)\) {
(.*)LOG\(INFO\)(.*)
(.*);
(.*)}(.*)
```

Replace String

`GRPC_TRACE_LOG($1, INFO) $3 $4;`

Closes #37351

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37351 from tanvi-jagtap:GRPC_TRACE_FLAG_ENABLED_03 6b9c0f2737
PiperOrigin-RevId: 659033759
pull/37350/head^2
Tanvi Jagtap 6 months ago committed by Copybara-Service
parent ecc1ea512e
commit cdc0263b1f
  1. 122
      src/core/client_channel/retry_filter_legacy_call_data.cc
  2. 14
      src/core/client_channel/subchannel.cc
  3. 7
      src/core/ext/filters/backend_metrics/backend_metric_filter.cc
  4. 7
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  5. 7
      src/core/ext/filters/http/message_compress/compression_filter.cc
  6. 6
      src/core/ext/filters/http/server/http_server_filter.cc
  7. 7
      src/core/ext/transport/chaotic_good/chaotic_good_transport.h
  8. 6
      src/core/ext/transport/chaotic_good/client_transport.cc
  9. 20
      src/core/ext/transport/chaotic_good/server_transport.cc
  10. 41
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  11. 7
      src/core/ext/transport/chttp2/transport/flow_control.cc
  12. 7
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  13. 7
      src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
  14. 14
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  15. 13
      src/core/handshaker/handshaker.cc
  16. 113
      src/core/resolver/xds/xds_dependency_manager.cc
  17. 27
      src/core/resolver/xds/xds_resolver.cc
  18. 13
      src/core/xds/grpc/xds_client_grpc.cc
  19. 42
      src/core/xds/xds_client/xds_client.cc
  20. 7
      src/cpp/ext/gcp/environment_autodetect.cc
  21. 65
      src/cpp/server/backend_metric_recorder.cc

@ -167,10 +167,9 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
}
RetryFilter::LegacyCallData::CallAttempt::~CallAttempt() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": destroying call attempt";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": destroying call attempt";
}
void RetryFilter::LegacyCallData::CallAttempt::
@ -520,10 +519,9 @@ void RetryFilter::LegacyCallData::CallAttempt::AddRetriableBatches(
}
void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": constructing retriable batches";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": constructing retriable batches";
// Construct list of closures to execute, one for each pending batch.
CallCombinerClosureList closures;
AddRetriableBatches(&closures);
@ -556,10 +554,9 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (calld_->retry_throttle_data_ != nullptr) {
calld_->retry_throttle_data_->RecordSuccess();
}
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": call succeeded";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": call succeeded";
return false;
}
// Status is not OK. Check whether the status is retryable.
@ -582,18 +579,16 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
// checks, so that we don't fail to record failures due to other factors.
if (calld_->retry_throttle_data_ != nullptr &&
!calld_->retry_throttle_data_->RecordFailure()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries throttled";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries throttled";
return false;
}
// Check whether the call is committed.
if (calld_->retry_committed_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries already committed";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries already committed";
return false;
}
// Check whether we have retries remaining.
@ -1295,10 +1290,9 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": deferring on_complete";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": deferring on_complete";
call_attempt->on_complete_deferred_batches_.emplace_back(
std::move(batch_data), error);
CallCombinerClosureList closures;
@ -1479,10 +1473,8 @@ grpc_error_handle RetryFilter::LegacyCallData::Init(
grpc_call_element* elem, const grpc_call_element_args* args) {
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
new (elem->call_data) RetryFilter::LegacyCallData(chand, *args);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand << " calld=" << elem->call_data
<< ": created call";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand << " calld=" << elem->call_data << ": created call";
return absl::OkStatus();
}
@ -1608,10 +1600,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
}
// Cancel retry timer if needed.
if (retry_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelling retry timer";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelling retry timer";
if (chand_->event_engine()->Cancel(*retry_timer_handle_)) {
GRPC_CALL_STACK_UNREF(owning_call_, "OnRetryTimer");
}
@ -1672,19 +1662,16 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// Otherwise, create a call attempt.
// The attempt will automatically start any necessary replays or
// pending batches.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": creating call attempt";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": creating call attempt";
retry_codepath_started_ = true;
CreateCallAttempt(/*is_transparent_retry=*/false);
return;
}
// Send batches to call attempt.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": starting batch on attempt=" << call_attempt_.get();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this
<< ": starting batch on attempt=" << call_attempt_.get();
call_attempt_->StartRetriableBatches();
}
@ -1739,28 +1726,22 @@ void RetryFilter::LegacyCallData::MaybeCacheSendOpsForBatch(
}
void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_initial_metadata";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_initial_metadata";
send_initial_metadata_.Clear();
}
void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
if (send_messages_[idx].slices != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_messages[" << idx << "]";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_messages[" << idx << "]";
Destruct(std::exchange(send_messages_[idx].slices, nullptr));
}
}
void RetryFilter::LegacyCallData::FreeCachedSendTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_trailing_metadata";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_trailing_metadata";
send_trailing_metadata_.Clear();
}
@ -1796,10 +1777,8 @@ RetryFilter::LegacyCallData::PendingBatch*
RetryFilter::LegacyCallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": adding pending batch at index " << idx;
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": adding pending batch at index " << idx;
PendingBatch* pending = &pending_batches_[idx];
CHECK_EQ(pending->batch, nullptr);
pending->batch = batch;
@ -1826,10 +1805,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
// ops have already been sent, and we commit to that attempt.
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
chand_->per_rpc_retry_buffer_size())) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": exceeded retry buffer size, committing";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": exceeded retry buffer size, committing";
RetryCommit(call_attempt_.get());
}
return pending;
@ -1862,10 +1839,8 @@ void RetryFilter::LegacyCallData::MaybeClearPendingBatch(
(!batch->recv_trailing_metadata ||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": clearing pending batch";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": clearing pending batch";
PendingBatchClear(pending);
}
}
@ -1918,10 +1893,9 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
PendingBatch* pending = &pending_batches_[i];
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this << ": "
<< log_message << " pending batch at index " << i;
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": " << log_message
<< " pending batch at index " << i;
return pending;
}
}
@ -1935,10 +1909,8 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
void RetryFilter::LegacyCallData::RetryCommit(CallAttempt* call_attempt) {
if (retry_committed_) return;
retry_committed_ = true;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": committing retries";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": committing retries";
if (call_attempt != nullptr) {
// If the call attempt's LB call has been committed, invoke the
// call's on_commit callback.
@ -2003,10 +1975,8 @@ void RetryFilter::LegacyCallData::OnRetryTimerLocked(
void RetryFilter::LegacyCallData::AddClosureToStartTransparentRetry(
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": scheduling transparent retry";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": scheduling transparent retry";
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
GRPC_CLOSURE_INIT(&retry_closure_, StartTransparentRetry, this, nullptr);
closures->Add(&retry_closure_, absl::OkStatus(), "start transparent retry");

@ -602,10 +602,9 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
// Only update the value if the new keepalive time is larger.
if (new_keepalive_time > keepalive_time_) {
keepalive_time_ = new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": throttling keepalive time to " << new_keepalive_time;
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString()
<< ": throttling keepalive time to " << new_keepalive_time;
args_ = args_.Set(GRPC_ARG_KEEPALIVE_TIME_MS, new_keepalive_time);
}
}
@ -758,10 +757,9 @@ void Subchannel::OnRetryTimer() {
void Subchannel::OnRetryTimerLocked() {
if (shutdown_) return;
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": backoff delay elapsed, reporting IDLE";
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString()
<< ": backoff delay elapsed, reporting IDLE";
SetConnectivityStateLocked(GRPC_CHANNEL_IDLE, absl::OkStatus());
}

@ -135,10 +135,9 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
}
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
if (serialized.has_value() && !serialized->empty()) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
LOG(INFO) << "[" << this
<< "] Backend metrics serialized. size: " << serialized->size();
}
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this
<< "] Backend metrics serialized. size: " << serialized->size();
md.Set(EndpointLoadMetricsBinMetadata(),
Slice::FromCopiedString(std::move(*serialized)));
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {

@ -150,10 +150,9 @@ FaultInjectionFilter::FaultInjectionFilter(ChannelFilter::Args filter_args)
ArenaPromise<absl::Status> FaultInjectionFilter::Call::OnClientInitialMetadata(
ClientMetadata& md, FaultInjectionFilter* filter) {
auto decision = filter->MakeInjectionDecision(md);
if (GRPC_TRACE_FLAG_ENABLED(fault_injection_filter)) {
LOG(INFO) << "chand=" << this << ": Fault injection triggered "
<< decision.ToString();
}
GRPC_TRACE_LOG(fault_injection_filter, INFO)
<< "chand=" << this << ": Fault injection triggered "
<< decision.ToString();
auto delay = decision.DelayUntil();
return TrySeq(Sleep(delay), [decision = std::move(decision)]() {
return decision.MaybeAbort();

@ -111,10 +111,9 @@ ChannelCompression::ChannelCompression(const ChannelArgs& args)
MessageHandle ChannelCompression::CompressMessage(
MessageHandle message, grpc_compression_algorithm algorithm) const {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
LOG(INFO) << "CompressMessage: len=" << message->payload()->Length()
<< " alg=" << algorithm << " flags=" << message->flags();
}
GRPC_TRACE_LOG(compression, INFO)
<< "CompressMessage: len=" << message->payload()->Length()
<< " alg=" << algorithm << " flags=" << message->flags();
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
if (call_tracer != nullptr) {
call_tracer->RecordSendMessage(*message->payload());

@ -139,10 +139,8 @@ ServerMetadataHandle HttpServerFilter::Call::OnClientInitialMetadata(
}
void HttpServerFilter::Call::OnServerInitialMetadata(ServerMetadata& md) {
if (GRPC_TRACE_FLAG_ENABLED(call)) {
LOG(INFO) << GetContext<Activity>()->DebugTag()
<< "[http-server] Write metadata";
}
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << "[http-server] Write metadata";
FilterOutgoingMetadata(&md);
md.Set(HttpStatusMetadata(), 200);
md.Set(ContentTypeMetadata(), ContentTypeMetadata::kApplicationGrpc);

@ -125,10 +125,9 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
FrameLimits limits) {
auto s = frame.Deserialize(&parser_, header, bitgen_, arena,
std::move(buffers), limits);
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: DeserializeFrame "
<< (s.ok() ? frame.ToString() : s.ToString());
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: DeserializeFrame "
<< (s.ok() ? frame.ToString() : s.ToString());
return s;
}

@ -280,10 +280,8 @@ auto ChaoticGoodClientTransport::CallOutboundLoop(uint32_t stream_id,
// Wait for initial metadata then send it out.
call_handler.PullClientInitialMetadata(),
[send_fragment](ClientMetadataHandle md) mutable {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: Sending initial metadata: "
<< md->DebugString();
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Sending initial metadata: " << md->DebugString();
ClientFragmentFrame frame;
frame.headers = std::move(md);
return send_fragment(std::move(frame));

@ -75,10 +75,8 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
CallInitiator call_initiator, ClientFragmentFrame frame,
uint32_t stream_id) {
DCHECK(frame.headers == nullptr);
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: PushFragmentIntoCall: frame="
<< frame.ToString();
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString();
return Seq(If(
frame.message.has_value(),
[&call_initiator, &frame]() mutable {
@ -185,10 +183,9 @@ auto ChaoticGoodServerTransport::SendCallInitialMetadataAndBody(
call_initiator.PullServerInitialMetadata(),
[stream_id, outgoing_frames, call_initiator,
this](absl::optional<ServerMetadataHandle> md) mutable {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: SendCallInitialMetadataAndBody: md="
<< (md.has_value() ? (*md)->DebugString() : "null");
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: SendCallInitialMetadataAndBody: md="
<< (md.has_value() ? (*md)->DebugString() : "null");
return If(
md.has_value(),
[&md, stream_id, &outgoing_frames, &call_initiator, this]() {
@ -340,10 +337,9 @@ auto ChaoticGoodServerTransport::OnTransportActivityDone(
absl::string_view activity) {
return [self = RefAsSubclass<ChaoticGoodServerTransport>(),
activity](absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: OnTransportActivityDone: activity="
<< activity << " status=" << status;
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: OnTransportActivityDone: activity=" << activity
<< " status=" << status;
self->AbortWithError();
};
}

@ -1651,10 +1651,9 @@ void grpc_chttp2_transport::PerformStreamOp(
}
}
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "perform_stream_op[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
}
GRPC_TRACE_LOG(http, INFO)
<< "perform_stream_op[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
op->handler_private.extra_arg = gs;
@ -1998,10 +1997,8 @@ static void perform_transport_op_locked(void* stream_op,
}
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "perform_transport_op[t=" << this
<< "]: " << grpc_transport_op_string(op);
}
GRPC_TRACE_LOG(http, INFO) << "perform_transport_op[t=" << this
<< "]: " << grpc_transport_op_string(op);
op->handler_private.extra_arg = this;
Ref().release()->combiner->Run(
GRPC_CLOSURE_INIT(&op->handler_private.closure,
@ -2816,10 +2813,9 @@ static void start_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void start_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
}
GRPC_TRACE_LOG(http, INFO)
<< t->peer_string.as_string_view()
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
if (!error.ok() || !t->closed_with_error.ok()) {
return;
}
@ -2842,10 +2838,9 @@ static void finish_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void finish_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << t->peer_string.as_string_view()
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
}
GRPC_TRACE_LOG(http, INFO)
<< t->peer_string.as_string_view()
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
if (!error.ok() || !t->closed_with_error.ok()) {
return;
}
@ -3107,10 +3102,9 @@ static void benign_reclaimer_locked(
if (error.ok() && t->stream_map.empty()) {
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - send goaway to free memory";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "HTTP2: " << t->peer_string.as_string_view()
<< " - send goaway to free memory";
send_goaway(t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
grpc_core::StatusIntProperty::kHttp2Error,
@ -3134,10 +3128,9 @@ static void destructive_reclaimer_locked(
if (error.ok() && !t->stream_map.empty()) {
// As stream_map is a hash map, this selects effectively a random stream.
grpc_chttp2_stream* s = t->stream_map.begin()->second;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
<< " - abandon stream id " << s->id;
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "HTTP2: " << t->peer_string.as_string_view()
<< " - abandon stream id " << s->id;
grpc_chttp2_cancel_stream(
t.get(), s,
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),

@ -233,10 +233,9 @@ void TransportFlowControl::UpdateSetting(
FlowControlAction& (FlowControlAction::*set)(FlowControlAction::Urgency,
uint32_t)) {
if (new_desired_value != *desired_value) {
if (GRPC_TRACE_FLAG_ENABLED(flowctl)) {
LOG(INFO) << "[flowctl] UPDATE SETTING " << name << " from "
<< *desired_value << " to " << new_desired_value;
}
GRPC_TRACE_LOG(flowctl, INFO)
<< "[flowctl] UPDATE SETTING " << name << " from " << *desired_value
<< " to " << new_desired_value;
// Reaching zero can only happen for initial window size, and if it occurs
// we really want to wake up writes and ensure all the queued stream
// window updates are flushed, since stream flow control operates

@ -93,10 +93,9 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (p->byte == 8) {
CHECK(is_last);
if (p->is_ack) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
<< "]: received ping ack " << p->opaque_8bytes;
}
GRPC_TRACE_LOG(http2_ping, INFO)
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t
<< "]: received ping ack " << p->opaque_8bytes;
grpc_chttp2_ack_ping(t, p->opaque_8bytes);
} else {
if (!t->is_client) {

@ -113,10 +113,9 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
((static_cast<uint32_t>(p->reason_bytes[1])) << 16) |
((static_cast<uint32_t>(p->reason_bytes[2])) << 8) |
((static_cast<uint32_t>(p->reason_bytes[3])));
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "[chttp2 transport=" << t << " stream=" << s
<< "] received RST_STREAM(reason=" << reason << ")";
}
GRPC_TRACE_LOG(http, INFO)
<< "[chttp2 transport=" << t << " stream=" << s
<< "] received RST_STREAM(reason=" << reason << ")";
grpc_error_handle error;
if (reason != GRPC_HTTP2_NO_ERROR || s->trailing_metadata_buffer.empty()) {
error = grpc_error_set_int(

@ -90,10 +90,9 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
} else {
t->lists[id].tail = s->links[id].prev;
}
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
LOG(INFO) << t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: remove from " << stream_list_id_string(id);
}
GRPC_TRACE_LOG(http2_stream_state, INFO)
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: remove from " << stream_list_id_string(id);
}
static bool stream_list_maybe_remove(grpc_chttp2_transport* t,
@ -122,10 +121,9 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
}
t->lists[id].tail = s;
s->included.set(id);
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
LOG(INFO) << t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: add to " << stream_list_id_string(id);
}
GRPC_TRACE_LOG(http2_stream_state, INFO)
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: add to " << stream_list_id_string(id);
}
static bool stream_list_add(grpc_chttp2_transport* t, grpc_chttp2_stream* s,

@ -139,17 +139,14 @@ void HandshakeManager::DoHandshake(
void HandshakeManager::Shutdown(absl::Status error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this
<< ": Shutdown() called: " << error;
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": Shutdown() called: " << error;
is_shutdown_ = true;
// Shutdown the handshaker that's currently in progress, if any.
if (index_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this
<< ": shutting down handshaker at index " << index_ - 1;
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this
<< ": shutting down handshaker at index " << index_ - 1;
handshakers_[index_ - 1]->Shutdown(std::move(error));
}
}

@ -364,10 +364,9 @@ XdsDependencyManager::XdsDependencyManager(
listener_resource_name_(std::move(listener_resource_name)),
args_(std::move(args)),
interested_parties_(interested_parties) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting watch for listener " << listener_resource_name_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] starting watch for listener "
<< listener_resource_name_;
auto listener_watcher = MakeRefCounted<ListenerWatcher>(Ref());
listener_watcher_ = listener_watcher.get();
XdsListenerResourceType::StartWatch(
@ -420,10 +419,8 @@ void XdsDependencyManager::ResetBackoff() {
void XdsDependencyManager::OnListenerUpdate(
std::shared_ptr<const XdsListenerResource> listener) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Listener update";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] received Listener update";
if (xds_client_ == nullptr) return;
const auto* hcm = absl::get_if<XdsListenerResource::HttpConnectionManager>(
&listener->listener);
@ -597,10 +594,8 @@ void XdsDependencyManager::OnResourceDoesNotExist(std::string context) {
void XdsDependencyManager::OnClusterUpdate(
const std::string& name,
std::shared_ptr<const XdsClusterResource> cluster) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Cluster update: " << name;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received Cluster update: " << name;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -610,10 +605,9 @@ void XdsDependencyManager::OnClusterUpdate(
void XdsDependencyManager::OnClusterError(const std::string& name,
absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Cluster error: " << name << " " << status;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Cluster error: " << name << " " << status;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -625,10 +619,8 @@ void XdsDependencyManager::OnClusterError(const std::string& name,
}
void XdsDependencyManager::OnClusterDoesNotExist(const std::string& name) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] Cluster does not exist: " << name;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] Cluster does not exist: " << name;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -640,10 +632,8 @@ void XdsDependencyManager::OnClusterDoesNotExist(const std::string& name) {
void XdsDependencyManager::OnEndpointUpdate(
const std::string& name,
std::shared_ptr<const XdsEndpointResource> endpoint) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Endpoint update: " << name;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received Endpoint update: " << name;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -672,10 +662,9 @@ void XdsDependencyManager::OnEndpointUpdate(
void XdsDependencyManager::OnEndpointError(const std::string& name,
absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Endpoint error: " << name << " " << status;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Endpoint error: " << name << " " << status;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -687,10 +676,8 @@ void XdsDependencyManager::OnEndpointError(const std::string& name,
}
void XdsDependencyManager::OnEndpointDoesNotExist(const std::string& name) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] Endpoint does not exist: " << name;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] Endpoint does not exist: " << name;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -702,10 +689,8 @@ void XdsDependencyManager::OnEndpointDoesNotExist(const std::string& name) {
void XdsDependencyManager::OnDnsResult(const std::string& dns_name,
Resolver::Result result) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received DNS update: " << dns_name;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received DNS update: " << dns_name;
if (xds_client_ == nullptr) return;
auto it = dns_resolvers_.find(dns_name);
if (it == dns_resolvers_.end()) return;
@ -765,10 +750,9 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
// Create a new watcher if needed.
if (state.watcher == nullptr) {
auto watcher = MakeRefCounted<ClusterWatcher>(Ref(), name);
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting watch for cluster " << name;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] starting watch for cluster "
<< name;
state.watcher = watcher.get();
XdsClusterResourceType::StartWatch(xds_client_.get(), name,
std::move(watcher));
@ -792,10 +776,9 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
// Start EDS watch if needed.
auto& eds_state = endpoint_watchers_[eds_resource_name];
if (eds_state.watcher == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting watch for endpoint " << eds_resource_name;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] starting watch for endpoint " << eds_resource_name;
auto watcher =
MakeRefCounted<EndpointWatcher>(Ref(), eds_resource_name);
eds_state.watcher = watcher.get();
@ -820,10 +803,9 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
// Start DNS resolver if needed.
auto& dns_state = dns_resolvers_[logical_dns.hostname];
if (dns_state.resolver == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting DNS resolver for " << logical_dns.hostname;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] starting DNS resolver for " << logical_dns.hostname;
auto* fake_resolver_response_generator = args_.GetPointer<
FakeResolverResponseGenerator>(
GRPC_ARG_XDS_LOGICAL_DNS_CLUSTER_FAKE_RESOLVER_RESPONSE_GENERATOR);
@ -987,10 +969,9 @@ void XdsDependencyManager::MaybeReportUpdate() {
++it;
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] cancelling watch for cluster " << cluster_name;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] cancelling watch for cluster "
<< cluster_name;
XdsClusterResourceType::CancelWatch(xds_client_.get(), cluster_name,
it->second.watcher,
/*delay_unsubscription=*/false);
@ -1005,10 +986,9 @@ void XdsDependencyManager::MaybeReportUpdate() {
++it;
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] cancelling watch for EDS resource " << eds_resource_name;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] cancelling watch for EDS resource " << eds_resource_name;
XdsEndpointResourceType::CancelWatch(xds_client_.get(), eds_resource_name,
it->second.watcher,
/*delay_unsubscription=*/false);
@ -1022,24 +1002,21 @@ void XdsDependencyManager::MaybeReportUpdate() {
++it;
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] shutting down DNS resolver for " << dns_name;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] shutting down DNS resolver for " << dns_name;
dns_resolvers_.erase(it++);
}
// If we have all the data we need, then send an update.
if (!have_all_resources) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] missing data -- NOT returning config";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] missing data -- NOT returning config";
return;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] returning config: " << config->ToString();
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] returning config: " << config->ToString();
watcher_->OnUpdate(std::move(config));
}

@ -545,10 +545,9 @@ XdsResolver::RouteConfigData::CreateMethodConfig(
absl::Status XdsResolver::RouteConfigData::AddRouteEntry(
XdsResolver* resolver, const XdsRouteConfigResource::Route& route,
const Duration& default_max_stream_duration) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << resolver << "] XdsConfigSelector " << this
<< ": route: " << route.ToString();
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << resolver << "] XdsConfigSelector " << this
<< ": route: " << route.ToString();
routes_.emplace_back(route);
auto* route_entry = &routes_.back();
auto maybe_add_cluster = [&](absl::string_view cluster_key,
@ -637,10 +636,8 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
RefCountedPtr<RouteConfigData> route_config_data)
: resolver_(std::move(resolver)),
route_config_data_(std::move(route_config_data)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << resolver_.get()
<< "] creating XdsConfigSelector " << this;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[xds_resolver " << resolver_.get()
<< "] creating XdsConfigSelector " << this;
// Populate filter list.
const auto& http_filter_registry =
static_cast<const GrpcXdsBootstrap&>(resolver_->xds_client_->bootstrap())
@ -660,10 +657,9 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
}
XdsResolver::XdsConfigSelector::~XdsConfigSelector() {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << resolver_.get()
<< "] destroying XdsConfigSelector " << this;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << resolver_.get()
<< "] destroying XdsConfigSelector " << this;
route_config_data_.reset();
if (!IsWorkSerializerDispatchEnabled()) {
resolver_->MaybeRemoveUnusedClusters();
@ -962,10 +958,9 @@ void XdsResolver::StartLocked() {
lds_resource_name_ =
absl::StrReplaceAll(name_template, {{"%s", resource_name_fragment}});
}
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] Started with lds_resource_name "
<< lds_resource_name_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] Started with lds_resource_name "
<< lds_resource_name_;
// Start watch for xDS config.
dependency_mgr_ = MakeOrphanable<XdsDependencyManager>(
xds_client_, work_serializer_,

@ -210,10 +210,9 @@ absl::StatusOr<std::string> GetBootstrapContents(const char* fallback_config) {
// Next, try GRPC_XDS_BOOTSTRAP_CONFIG env var.
auto env_config = GetEnv("GRPC_XDS_BOOTSTRAP_CONFIG");
if (env_config.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "Got bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG "
"environment variable";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "Got bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG "
<< "environment variable";
return std::move(*env_config);
}
// Finally, try fallback config.
@ -270,10 +269,8 @@ absl::StatusOr<RefCountedPtr<GrpcXdsClient>> GrpcXdsClient::GetOrCreate(
key, std::move(*bootstrap), channel_args,
MakeOrphanable<GrpcXdsTransportFactory>(channel_args));
g_xds_client_map->emplace(xds_client->key(), xds_client.get());
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client.get()
<< "] Created xDS client for key " << key;
}
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client.get()
<< "] Created xDS client for key " << key;
return xds_client;
}

@ -457,10 +457,9 @@ XdsClient::XdsChannel::XdsChannel(WeakRefCountedPtr<XdsClient> xds_client,
: nullptr),
xds_client_(std::move(xds_client)),
server_(server) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] creating channel "
<< this << " for server " << server.server_uri();
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] creating channel " << this
<< " for server " << server.server_uri();
absl::Status status;
transport_ = xds_client_->transport_factory_->Create(
server,
@ -474,10 +473,9 @@ XdsClient::XdsChannel::XdsChannel(WeakRefCountedPtr<XdsClient> xds_client,
}
XdsClient::XdsChannel::~XdsChannel() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] destroying xds channel "
<< this << " for server " << server_.server_uri();
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] destroying xds channel " << this
<< " for server " << server_.server_uri();
xds_client_.reset(DEBUG_LOCATION, "XdsChannel");
}
@ -486,10 +484,9 @@ XdsClient::XdsChannel::~XdsChannel() {
// called from DualRefCounted::Unref, which cannot have a lock annotation for
// a lock in this subclass.
void XdsClient::XdsChannel::Orphaned() ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] orphaning xds channel "
<< this << " for server " << server_.server_uri();
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] orphaning xds channel " << this
<< " for server " << server_.server_uri();
shutting_down_ = true;
transport_.reset();
// At this time, all strong refs are removed, remove from channel map to
@ -583,10 +580,9 @@ bool XdsClient::XdsChannel::MaybeFallbackLocked(
}
if (authority_state.xds_channels.back()->status().ok()) return true;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] authority "
<< authority << ": No fallback server";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] authority " << authority
<< ": No fallback server";
return false;
}
@ -1702,10 +1698,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
// If we already have a cached value for the resource, notify the new
// watcher immediately.
if (resource_state.resource != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] returning cached listener data for " << name;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] returning cached listener data for "
<< name;
work_serializer_.Schedule(
[watcher, value = resource_state.resource]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
@ -1715,10 +1710,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::DOES_NOT_EXIST) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] reporting cached does-not-exist for " << name;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] reporting cached does-not-exist for " << name;
work_serializer_.Schedule(
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnResourceDoesNotExist(ReadDelayHandle::NoWait());

@ -268,10 +268,9 @@ class EnvironmentAutoDetectHelper
// If fetching from the MetadataServer failed and we were
// assuming a GCE environment, fallback to "global".
else if (assuming_gce_) {
if (GRPC_TRACE_FLAG_ENABLED(environment_autodetect)) {
LOG(INFO) << "Environment Autodetect: Falling back to "
"global resource type";
}
GRPC_TRACE_LOG(environment_autodetect, INFO)
<< "Environment Autodetect: Falling back to "
<< "global resource type";
assuming_gce_ = false;
resource_.resource_type = "global";
}

@ -98,10 +98,8 @@ void ServerMetricRecorder::SetMemoryUtilization(double value) {
void ServerMetricRecorder::SetApplicationUtilization(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this
<< "] Application utilization rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState([value](BackendMetricData* data) {
@ -142,16 +140,14 @@ void ServerMetricRecorder::SetEps(double value) {
void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Named utilization rejected: " << value
<< " name: " << std::string(name.data(), name.size());
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization rejected: " << value
<< " name: " << std::string(name.data(), name.size());
return;
}
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Named utilization set: " << value
<< " name: " << std::string(name.data(), name.size());
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization set: " << value
<< " name: " << std::string(name.data(), name.size());
UpdateBackendMetricDataState([name, value](BackendMetricData* data) {
data->utilization[absl::string_view(name.data(), name.size())] = value;
});
@ -159,10 +155,9 @@ void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
void ServerMetricRecorder::SetAllNamedUtilization(
std::map<string_ref, double> named_utilization) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] All named utilization updated. size: "
<< named_utilization.size();
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this
<< "] All named utilization updated. size: " << named_utilization.size();
UpdateBackendMetricDataState(
[utilization = std::move(named_utilization)](BackendMetricData* data) {
data->utilization.clear();
@ -212,10 +207,9 @@ void ServerMetricRecorder::ClearEps() {
}
void ServerMetricRecorder::ClearNamedUtilization(string_ref name) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Named utilization cleared. name: "
<< std::string(name.data(), name.size());
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization cleared. name: "
<< std::string(name.data(), name.size());
UpdateBackendMetricDataState([name](BackendMetricData* data) {
data->utilization.erase(absl::string_view(name.data(), name.size()));
});
@ -281,10 +275,8 @@ BackendMetricState::RecordMemoryUtilizationMetric(double value) {
experimental::CallMetricRecorder&
BackendMetricState::RecordApplicationUtilizationMetric(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this
<< "] Application utilization value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization value rejected: " << value;
return *this;
}
application_utilization_.store(value, std::memory_order_relaxed);
@ -327,19 +319,16 @@ experimental::CallMetricRecorder& BackendMetricState::RecordEpsMetric(
experimental::CallMetricRecorder& BackendMetricState::RecordUtilizationMetric(
string_ref name, double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Utilization value rejected: "
<< std::string(name.data(), name.length()) << " " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Utilization value rejected: "
<< std::string(name.data(), name.length()) << " " << value;
return *this;
}
internal::MutexLock lock(&mu_);
absl::string_view name_sv(name.data(), name.length());
utilization_[name_sv] = value;
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Utilization recorded: " << name_sv << " "
<< value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Utilization recorded: " << name_sv << " " << value;
return *this;
}
@ -348,10 +337,8 @@ experimental::CallMetricRecorder& BackendMetricState::RecordRequestCostMetric(
internal::MutexLock lock(&mu_);
absl::string_view name_sv(name.data(), name.length());
request_cost_[name_sv] = value;
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Request cost recorded: " << name_sv << " "
<< value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Request cost recorded: " << name_sv << " " << value;
return *this;
}
@ -360,10 +347,8 @@ experimental::CallMetricRecorder& BackendMetricState::RecordNamedMetric(
internal::MutexLock lock(&mu_);
absl::string_view name_sv(name.data(), name.length());
named_metrics_[name_sv] = value;
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Named metric recorded: " << name_sv << " "
<< value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named metric recorded: " << name_sv << " " << value;
return *this;
}

Loading…
Cancel
Save