[Gpr_To_Absl_Logging]

pull/37351/head
tanvi-jagtap 4 months ago
parent f5be412bb1
commit 6b9c0f2737
  1. 79
      src/core/client_channel/retry_filter_legacy_call_data.cc
  2. 8
      src/core/client_channel/subchannel.cc
  3. 5
      src/core/ext/filters/backend_metrics/backend_metric_filter.cc
  4. 4
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  5. 4
      src/core/ext/filters/http/message_compress/compression_filter.cc
  6. 4
      src/core/ext/filters/http/server/http_server_filter.cc
  7. 4
      src/core/ext/transport/chaotic_good/chaotic_good_transport.h
  8. 3
      src/core/ext/transport/chaotic_good/client_transport.cc
  9. 11
      src/core/ext/transport/chaotic_good/server_transport.cc
  10. 26
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  11. 4
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  12. 4
      src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
  13. 8
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  14. 9
      src/core/handshaker/handshaker.cc
  15. 72
      src/core/resolver/xds/xds_dependency_manager.cc
  16. 20
      src/core/resolver/xds/xds_resolver.cc
  17. 4
      src/core/xds/grpc/xds_client_grpc.cc
  18. 32
      src/core/xds/xds_client/xds_client.cc
  19. 39
      src/cpp/server/backend_metric_recorder.cc

@ -168,8 +168,8 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
RetryFilter::LegacyCallData::CallAttempt::~CallAttempt() {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_ < < < <
" attempt=" << this << ": destroying call attempt";
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": destroying call attempt";
}
void RetryFilter::LegacyCallData::CallAttempt::
@ -520,8 +520,8 @@ void RetryFilter::LegacyCallData::CallAttempt::AddRetriableBatches(
void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_ < < < <
" attempt=" << this << ": constructing retriable batches";
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": constructing retriable batches";
// Construct list of closures to execute, one for each pending batch.
CallCombinerClosureList closures;
AddRetriableBatches(&closures);
@ -555,8 +555,8 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
calld_->retry_throttle_data_->RecordSuccess();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_ < < < <
" attempt=" << this << ": call succeeded";
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": call succeeded";
return false;
}
// Status is not OK. Check whether the status is retryable.
@ -580,15 +580,15 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (calld_->retry_throttle_data_ != nullptr &&
!calld_->retry_throttle_data_->RecordFailure()) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_ < < < <
" attempt=" << this << ": retries throttled";
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries throttled";
return false;
}
// Check whether the call is committed.
if (calld_->retry_committed_) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_ < < < <
" attempt=" << this << ": retries already committed";
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": retries already committed";
return false;
}
// Check whether we have retries remaining.
@ -1291,8 +1291,8 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
!call_attempt->completed_recv_trailing_metadata_)) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld < < < <
" attempt=" << call_attempt << ": deferring on_complete";
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": deferring on_complete";
call_attempt->on_complete_deferred_batches_.emplace_back(
std::move(batch_data), error);
CallCombinerClosureList closures;
@ -1474,8 +1474,7 @@ grpc_error_handle RetryFilter::LegacyCallData::Init(
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
new (elem->call_data) RetryFilter::LegacyCallData(chand, *args);
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand << " calld=" << elem->call_data < < < <
": created call";
<< "chand=" << chand << " calld=" << elem->call_data << ": created call";
return absl::OkStatus();
}
@ -1601,9 +1600,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
}
// Cancel retry timer if needed.
if (retry_timer_handle_.has_value()) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this < < < <
": cancelling retry timer";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelling retry timer";
if (chand_->event_engine()->Cancel(*retry_timer_handle_)) {
GRPC_CALL_STACK_UNREF(owning_call_, "OnRetryTimer");
}
@ -1665,15 +1663,15 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// The attempt will automatically start any necessary replays or
// pending batches.
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this < < < <
": creating call attempt";
<< "chand=" << chand_ << " calld=" << this << ": creating call attempt";
retry_codepath_started_ = true;
CreateCallAttempt(/*is_transparent_retry=*/false);
return;
}
// Send batches to call attempt.
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": starting batch on attempt=" << call_attempt_.get();
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this
<< ": starting batch on attempt=" << call_attempt_.get();
call_attempt_->StartRetriableBatches();
}
@ -1728,23 +1726,22 @@ void RetryFilter::LegacyCallData::MaybeCacheSendOpsForBatch(
}
void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": destroying send_initial_metadata";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_initial_metadata";
send_initial_metadata_.Clear();
}
void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
if (send_messages_[idx].slices != nullptr) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this < < < <
": destroying send_messages[" << idx << "]";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_messages[" << idx << "]";
Destruct(std::exchange(send_messages_[idx].slices, nullptr));
}
}
void RetryFilter::LegacyCallData::FreeCachedSendTrailingMetadata() {
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": destroying send_trailing_metadata";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": destroying send_trailing_metadata";
send_trailing_metadata_.Clear();
}
@ -1780,8 +1777,8 @@ RetryFilter::LegacyCallData::PendingBatch*
RetryFilter::LegacyCallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": adding pending batch at index " << idx;
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": adding pending batch at index " << idx;
PendingBatch* pending = &pending_batches_[idx];
CHECK_EQ(pending->batch, nullptr);
pending->batch = batch;
@ -1808,9 +1805,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
// ops have already been sent, and we commit to that attempt.
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
chand_->per_rpc_retry_buffer_size())) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this < < < <
": exceeded retry buffer size, committing";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": exceeded retry buffer size, committing";
RetryCommit(call_attempt_.get());
}
return pending;
@ -1843,9 +1839,8 @@ void RetryFilter::LegacyCallData::MaybeClearPendingBatch(
(!batch->recv_trailing_metadata ||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this < < < <
": clearing pending batch";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": clearing pending batch";
PendingBatchClear(pending);
}
}
@ -1899,8 +1894,8 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": " < < < <
log_message << " pending batch at index " << i;
<< "chand=" << chand_ << " calld=" << this << ": " << log_message
<< " pending batch at index " << i;
return pending;
}
}
@ -1914,8 +1909,8 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
void RetryFilter::LegacyCallData::RetryCommit(CallAttempt* call_attempt) {
if (retry_committed_) return;
retry_committed_ = true;
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": committing retries";
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": committing retries";
if (call_attempt != nullptr) {
// If the call attempt's LB call has been committed, invoke the
// call's on_commit callback.
@ -1980,8 +1975,8 @@ void RetryFilter::LegacyCallData::OnRetryTimerLocked(
void RetryFilter::LegacyCallData::AddClosureToStartTransparentRetry(
CallCombinerClosureList* closures) {
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this < < < <
": scheduling transparent retry";
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": scheduling transparent retry";
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
GRPC_CLOSURE_INIT(&retry_closure_, StartTransparentRetry, this, nullptr);
closures->Add(&retry_closure_, absl::OkStatus(), "start transparent retry");

@ -603,8 +603,8 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
if (new_keepalive_time > keepalive_time_) {
keepalive_time_ = new_keepalive_time;
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString() < < < <
": throttling keepalive time to " << new_keepalive_time;
<< "subchannel " << this << " " << key_.ToString()
<< ": throttling keepalive time to " << new_keepalive_time;
args_ = args_.Set(GRPC_ARG_KEEPALIVE_TIME_MS, new_keepalive_time);
}
}
@ -758,8 +758,8 @@ void Subchannel::OnRetryTimer() {
void Subchannel::OnRetryTimerLocked() {
if (shutdown_) return;
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString() < < < <
": backoff delay elapsed, reporting IDLE";
<< "subchannel " << this << " " << key_.ToString()
<< ": backoff delay elapsed, reporting IDLE";
SetConnectivityStateLocked(GRPC_CHANNEL_IDLE, absl::OkStatus());
}

@ -135,8 +135,9 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
}
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
if (serialized.has_value() && !serialized->empty()) {
GRPC_TRACE_LOG(backend_metric_filter, INFO) << "[" << this < < < <
"] Backend metrics serialized. size: " << serialized->size();
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this
<< "] Backend metrics serialized. size: " << serialized->size();
md.Set(EndpointLoadMetricsBinMetadata(),
Slice::FromCopiedString(std::move(*serialized)));
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {

@ -151,8 +151,8 @@ ArenaPromise<absl::Status> FaultInjectionFilter::Call::OnClientInitialMetadata(
ClientMetadata& md, FaultInjectionFilter* filter) {
auto decision = filter->MakeInjectionDecision(md);
GRPC_TRACE_LOG(fault_injection_filter, INFO)
<< "chand=" << this << ": Fault injection triggered " < < < <
decision.ToString();
<< "chand=" << this << ": Fault injection triggered "
<< decision.ToString();
auto delay = decision.DelayUntil();
return TrySeq(Sleep(delay), [decision = std::move(decision)]() {
return decision.MaybeAbort();

@ -112,8 +112,8 @@ ChannelCompression::ChannelCompression(const ChannelArgs& args)
MessageHandle ChannelCompression::CompressMessage(
MessageHandle message, grpc_compression_algorithm algorithm) const {
GRPC_TRACE_LOG(compression, INFO)
<< "CompressMessage: len=" << message->payload()->Length() < < < <
" alg=" << algorithm << " flags=" << message->flags();
<< "CompressMessage: len=" << message->payload()->Length()
<< " alg=" << algorithm << " flags=" << message->flags();
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
if (call_tracer != nullptr) {
call_tracer->RecordSendMessage(*message->payload());

@ -139,8 +139,8 @@ ServerMetadataHandle HttpServerFilter::Call::OnClientInitialMetadata(
}
void HttpServerFilter::Call::OnServerInitialMetadata(ServerMetadata& md) {
GRPC_TRACE_LOG(call, INFO) << GetContext<Activity>()->DebugTag() < < < <
"[http-server] Write metadata";
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << "[http-server] Write metadata";
FilterOutgoingMetadata(&md);
md.Set(HttpStatusMetadata(), 200);
md.Set(ContentTypeMetadata(), ContentTypeMetadata::kApplicationGrpc);

@ -126,8 +126,8 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
auto s = frame.Deserialize(&parser_, header, bitgen_, arena,
std::move(buffers), limits);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: DeserializeFrame " < < < <
(s.ok() ? frame.ToString() : s.ToString());
<< "CHAOTIC_GOOD: DeserializeFrame "
<< (s.ok() ? frame.ToString() : s.ToString());
return s;
}

@ -281,8 +281,7 @@ auto ChaoticGoodClientTransport::CallOutboundLoop(uint32_t stream_id,
call_handler.PullClientInitialMetadata(),
[send_fragment](ClientMetadataHandle md) mutable {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: Sending initial metadata: " < < < <
md->DebugString();
<< "CHAOTIC_GOOD: Sending initial metadata: " << md->DebugString();
ClientFragmentFrame frame;
frame.headers = std::move(md);
return send_fragment(std::move(frame));

@ -76,8 +76,7 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
uint32_t stream_id) {
DCHECK(frame.headers == nullptr);
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" < < < <
frame.ToString();
<< "CHAOTIC_GOOD: PushFragmentIntoCall: frame=" << frame.ToString();
return Seq(If(
frame.message.has_value(),
[&call_initiator, &frame]() mutable {
@ -185,8 +184,8 @@ auto ChaoticGoodServerTransport::SendCallInitialMetadataAndBody(
[stream_id, outgoing_frames, call_initiator,
this](absl::optional<ServerMetadataHandle> md) mutable {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: SendCallInitialMetadataAndBody: md=" < < < <
(md.has_value() ? (*md)->DebugString() : "null");
<< "CHAOTIC_GOOD: SendCallInitialMetadataAndBody: md="
<< (md.has_value() ? (*md)->DebugString() : "null");
return If(
md.has_value(),
[&md, stream_id, &outgoing_frames, &call_initiator, this]() {
@ -339,8 +338,8 @@ auto ChaoticGoodServerTransport::OnTransportActivityDone(
return [self = RefAsSubclass<ChaoticGoodServerTransport>(),
activity](absl::Status status) {
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: OnTransportActivityDone: activity=" < < < <
activity << " status=" << status;
<< "CHAOTIC_GOOD: OnTransportActivityDone: activity=" << activity
<< " status=" << status;
self->AbortWithError();
};
}

@ -1654,8 +1654,8 @@ void grpc_chttp2_transport::PerformStreamOp(
}
GRPC_TRACE_LOG(http, INFO)
<< "perform_stream_op[s=" << s << "; op=" << op < < < <
"]: " << grpc_transport_stream_op_batch_string(op, false);
<< "perform_stream_op[s=" << s << "; op=" << op
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
op->handler_private.extra_arg = gs;
@ -1999,8 +1999,8 @@ static void perform_transport_op_locked(void* stream_op,
}
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
GRPC_TRACE_LOG(http, INFO) << "perform_transport_op[t=" << this < < < <
"]: " << grpc_transport_op_string(op);
GRPC_TRACE_LOG(http, INFO) << "perform_transport_op[t=" << this
<< "]: " << grpc_transport_op_string(op);
op->handler_private.extra_arg = this;
Ref().release()->combiner->Run(
GRPC_CLOSURE_INIT(&op->handler_private.closure,
@ -2815,8 +2815,9 @@ static void start_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void start_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
GRPC_TRACE_LOG(http, INFO) << t->peer_string.as_string_view() < < < <
": Start BDP ping err=" << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(http, INFO)
<< t->peer_string.as_string_view()
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
if (!error.ok() || !t->closed_with_error.ok()) {
return;
}
@ -2839,8 +2840,9 @@ static void finish_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void finish_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
GRPC_TRACE_LOG(http, INFO) << t->peer_string.as_string_view() < < < <
": Complete BDP ping err=" << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(http, INFO)
<< t->peer_string.as_string_view()
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
if (!error.ok() || !t->closed_with_error.ok()) {
return;
}
@ -3103,8 +3105,8 @@ static void benign_reclaimer_locked(
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
GRPC_TRACE_LOG(resource_quota, INFO)
<< "HTTP2: " << t->peer_string.as_string_view() < < < <
" - send goaway to free memory";
<< "HTTP2: " << t->peer_string.as_string_view()
<< " - send goaway to free memory";
send_goaway(t.get(),
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
grpc_core::StatusIntProperty::kHttp2Error,
@ -3129,8 +3131,8 @@ static void destructive_reclaimer_locked(
// As stream_map is a hash map, this selects effectively a random stream.
grpc_chttp2_stream* s = t->stream_map.begin()->second;
GRPC_TRACE_LOG(resource_quota, INFO)
<< "HTTP2: " << t->peer_string.as_string_view() < < < <
" - abandon stream id " << s->id;
<< "HTTP2: " << t->peer_string.as_string_view()
<< " - abandon stream id " << s->id;
grpc_chttp2_cancel_stream(
t.get(), s,
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),

@ -94,8 +94,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
CHECK(is_last);
if (p->is_ack) {
GRPC_TRACE_LOG(http2_ping, INFO)
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t < < < <
"]: received ping ack " << p->opaque_8bytes;
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t
<< "]: received ping ack " << p->opaque_8bytes;
grpc_chttp2_ack_ping(t, p->opaque_8bytes);
} else {
if (!t->is_client) {

@ -114,8 +114,8 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
((static_cast<uint32_t>(p->reason_bytes[2])) << 8) |
((static_cast<uint32_t>(p->reason_bytes[3])));
GRPC_TRACE_LOG(http, INFO)
<< "[chttp2 transport=" << t << " stream=" << s < < < <
"] received RST_STREAM(reason=" << reason << ")";
<< "[chttp2 transport=" << t << " stream=" << s
<< "] received RST_STREAM(reason=" << reason << ")";
grpc_error_handle error;
if (reason != GRPC_HTTP2_NO_ERROR || s->trailing_metadata_buffer.empty()) {
error = grpc_error_set_int(

@ -91,8 +91,8 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
t->lists[id].tail = s->links[id].prev;
}
GRPC_TRACE_LOG(http2_stream_state, INFO)
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr") < < < <
"]: remove from " << stream_list_id_string(id);
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: remove from " << stream_list_id_string(id);
}
static bool stream_list_maybe_remove(grpc_chttp2_transport* t,
@ -122,8 +122,8 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
t->lists[id].tail = s;
s->included.set(id);
GRPC_TRACE_LOG(http2_stream_state, INFO)
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr") < < < <
"]: add to " << stream_list_id_string(id);
<< t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
<< "]: add to " << stream_list_id_string(id);
}
static bool stream_list_add(grpc_chttp2_transport* t, grpc_chttp2_stream* s,

@ -139,13 +139,14 @@ void HandshakeManager::DoHandshake(
void HandshakeManager::Shutdown(absl::Status error) {
MutexLock lock(&mu_);
if (!is_shutdown_) {
GRPC_TRACE_LOG(handshaker, INFO) << "handshake_manager " << this < < < <
": Shutdown() called: " << error;
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": Shutdown() called: " << error;
is_shutdown_ = true;
// Shutdown the handshaker that's currently in progress, if any.
if (index_ > 0) {
GRPC_TRACE_LOG(handshaker, INFO) << "handshake_manager " << this < < < <
": shutting down handshaker at index " << index_ - 1;
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this
<< ": shutting down handshaker at index " << index_ - 1;
handshakers_[index_ - 1]->Shutdown(std::move(error));
}
}

@ -364,8 +364,9 @@ XdsDependencyManager::XdsDependencyManager(
listener_resource_name_(std::move(listener_resource_name)),
args_(std::move(args)),
interested_parties_(interested_parties) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] starting watch for listener " << listener_resource_name_;
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] starting watch for listener "
<< listener_resource_name_;
auto listener_watcher = MakeRefCounted<ListenerWatcher>(Ref());
listener_watcher_ = listener_watcher.get();
XdsListenerResourceType::StartWatch(
@ -418,8 +419,8 @@ void XdsDependencyManager::ResetBackoff() {
void XdsDependencyManager::OnListenerUpdate(
std::shared_ptr<const XdsListenerResource> listener) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received Listener update";
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] received Listener update";
if (xds_client_ == nullptr) return;
const auto* hcm = absl::get_if<XdsListenerResource::HttpConnectionManager>(
&listener->listener);
@ -593,8 +594,8 @@ void XdsDependencyManager::OnResourceDoesNotExist(std::string context) {
void XdsDependencyManager::OnClusterUpdate(
const std::string& name,
std::shared_ptr<const XdsClusterResource> cluster) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received Cluster update: " << name;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received Cluster update: " << name;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -604,8 +605,9 @@ void XdsDependencyManager::OnClusterUpdate(
void XdsDependencyManager::OnClusterError(const std::string& name,
absl::Status status) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received Cluster error: " << name << " " << status;
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Cluster error: " << name << " " << status;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -617,8 +619,8 @@ void XdsDependencyManager::OnClusterError(const std::string& name,
}
void XdsDependencyManager::OnClusterDoesNotExist(const std::string& name) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] Cluster does not exist: " << name;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] Cluster does not exist: " << name;
if (xds_client_ == nullptr) return;
auto it = cluster_watchers_.find(name);
if (it == cluster_watchers_.end()) return;
@ -630,8 +632,8 @@ void XdsDependencyManager::OnClusterDoesNotExist(const std::string& name) {
void XdsDependencyManager::OnEndpointUpdate(
const std::string& name,
std::shared_ptr<const XdsEndpointResource> endpoint) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received Endpoint update: " << name;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received Endpoint update: " << name;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -660,8 +662,9 @@ void XdsDependencyManager::OnEndpointUpdate(
void XdsDependencyManager::OnEndpointError(const std::string& name,
absl::Status status) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received Endpoint error: " << name << " " << status;
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Endpoint error: " << name << " " << status;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -673,8 +676,8 @@ void XdsDependencyManager::OnEndpointError(const std::string& name,
}
void XdsDependencyManager::OnEndpointDoesNotExist(const std::string& name) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] Endpoint does not exist: " << name;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] Endpoint does not exist: " << name;
if (xds_client_ == nullptr) return;
auto it = endpoint_watchers_.find(name);
if (it == endpoint_watchers_.end()) return;
@ -686,8 +689,8 @@ void XdsDependencyManager::OnEndpointDoesNotExist(const std::string& name) {
void XdsDependencyManager::OnDnsResult(const std::string& dns_name,
Resolver::Result result) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] received DNS update: " << dns_name;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received DNS update: " << dns_name;
if (xds_client_ == nullptr) return;
auto it = dns_resolvers_.find(dns_name);
if (it == dns_resolvers_.end()) return;
@ -748,8 +751,8 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
if (state.watcher == nullptr) {
auto watcher = MakeRefCounted<ClusterWatcher>(Ref(), name);
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] starting watch for cluster " << name;
<< "[XdsDependencyManager " << this << "] starting watch for cluster "
<< name;
state.watcher = watcher.get();
XdsClusterResourceType::StartWatch(xds_client_.get(), name,
std::move(watcher));
@ -774,8 +777,8 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
auto& eds_state = endpoint_watchers_[eds_resource_name];
if (eds_state.watcher == nullptr) {
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] starting watch for endpoint " << eds_resource_name;
<< "[XdsDependencyManager " << this
<< "] starting watch for endpoint " << eds_resource_name;
auto watcher =
MakeRefCounted<EndpointWatcher>(Ref(), eds_resource_name);
eds_state.watcher = watcher.get();
@ -801,8 +804,8 @@ bool XdsDependencyManager::PopulateClusterConfigMap(
auto& dns_state = dns_resolvers_[logical_dns.hostname];
if (dns_state.resolver == nullptr) {
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] starting DNS resolver for " << logical_dns.hostname;
<< "[XdsDependencyManager " << this
<< "] starting DNS resolver for " << logical_dns.hostname;
auto* fake_resolver_response_generator = args_.GetPointer<
FakeResolverResponseGenerator>(
GRPC_ARG_XDS_LOGICAL_DNS_CLUSTER_FAKE_RESOLVER_RESPONSE_GENERATOR);
@ -967,8 +970,8 @@ void XdsDependencyManager::MaybeReportUpdate() {
continue;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] cancelling watch for cluster " << cluster_name;
<< "[XdsDependencyManager " << this << "] cancelling watch for cluster "
<< cluster_name;
XdsClusterResourceType::CancelWatch(xds_client_.get(), cluster_name,
it->second.watcher,
/*delay_unsubscription=*/false);
@ -984,8 +987,8 @@ void XdsDependencyManager::MaybeReportUpdate() {
continue;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] cancelling watch for EDS resource " << eds_resource_name;
<< "[XdsDependencyManager " << this
<< "] cancelling watch for EDS resource " << eds_resource_name;
XdsEndpointResourceType::CancelWatch(xds_client_.get(), eds_resource_name,
it->second.watcher,
/*delay_unsubscription=*/false);
@ -1000,19 +1003,20 @@ void XdsDependencyManager::MaybeReportUpdate() {
continue;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] shutting down DNS resolver for " << dns_name;
<< "[XdsDependencyManager " << this
<< "] shutting down DNS resolver for " << dns_name;
dns_resolvers_.erase(it++);
}
// If we have all the data we need, then send an update.
if (!have_all_resources) {
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this < < < <
"] missing data -- NOT returning config";
<< "[XdsDependencyManager " << this
<< "] missing data -- NOT returning config";
return;
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this < < < <
"] returning config: " << config->ToString();
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] returning config: " << config->ToString();
watcher_->OnUpdate(std::move(config));
}

@ -545,9 +545,9 @@ XdsResolver::RouteConfigData::CreateMethodConfig(
absl::Status XdsResolver::RouteConfigData::AddRouteEntry(
XdsResolver* resolver, const XdsRouteConfigResource::Route& route,
const Duration& default_max_stream_duration) {
GRPC_TRACE_LOG(xds_resolver, INFO) << "[xds_resolver " << resolver
<< "] XdsConfigSelector " << this < < < <
": route: " << route.ToString();
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << resolver << "] XdsConfigSelector " << this
<< ": route: " << route.ToString();
routes_.emplace_back(route);
auto* route_entry = &routes_.back();
auto maybe_add_cluster = [&](absl::string_view cluster_key,
@ -636,9 +636,8 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
RefCountedPtr<RouteConfigData> route_config_data)
: resolver_(std::move(resolver)),
route_config_data_(std::move(route_config_data)) {
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << resolver_.get() < < < <
"] creating XdsConfigSelector " << this;
GRPC_TRACE_LOG(xds_resolver, INFO) << "[xds_resolver " << resolver_.get()
<< "] creating XdsConfigSelector " << this;
// Populate filter list.
const auto& http_filter_registry =
static_cast<const GrpcXdsBootstrap&>(resolver_->xds_client_->bootstrap())
@ -659,8 +658,8 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
XdsResolver::XdsConfigSelector::~XdsConfigSelector() {
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << resolver_.get() < < < <
"] destroying XdsConfigSelector " << this;
<< "[xds_resolver " << resolver_.get()
<< "] destroying XdsConfigSelector " << this;
route_config_data_.reset();
if (!IsWorkSerializerDispatchEnabled()) {
resolver_->MaybeRemoveUnusedClusters();
@ -960,9 +959,8 @@ void XdsResolver::StartLocked() {
absl::StrReplaceAll(name_template, {{"%s", resource_name_fragment}});
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this
<< "] Started with lds_resource_name " < < < <
lds_resource_name_;
<< "[xds_resolver " << this << "] Started with lds_resource_name "
<< lds_resource_name_;
// Start watch for xDS config.
dependency_mgr_ = MakeOrphanable<XdsDependencyManager>(
xds_client_, work_serializer_,

@ -269,8 +269,8 @@ absl::StatusOr<RefCountedPtr<GrpcXdsClient>> GrpcXdsClient::GetOrCreate(
key, std::move(*bootstrap), channel_args,
MakeOrphanable<GrpcXdsTransportFactory>(channel_args));
g_xds_client_map->emplace(xds_client->key(), xds_client.get());
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client.get() < < < <
"] Created xDS client for key " << key;
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client.get()
<< "] Created xDS client for key " << key;
return xds_client;
}

@ -457,9 +457,9 @@ XdsClient::XdsChannel::XdsChannel(WeakRefCountedPtr<XdsClient> xds_client,
: nullptr),
xds_client_(std::move(xds_client)),
server_(server) {
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client_.get()
<< "] creating channel " < < < <
this << " for server " << server.server_uri();
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] creating channel " << this
<< " for server " << server.server_uri();
absl::Status status;
transport_ = xds_client_->transport_factory_->Create(
server,
@ -473,9 +473,9 @@ XdsClient::XdsChannel::XdsChannel(WeakRefCountedPtr<XdsClient> xds_client,
}
XdsClient::XdsChannel::~XdsChannel() {
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client()
<< "] destroying xds channel " < < < <
this << " for server " << server_.server_uri();
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] destroying xds channel " << this
<< " for server " << server_.server_uri();
xds_client_.reset(DEBUG_LOCATION, "XdsChannel");
}
@ -484,9 +484,9 @@ XdsClient::XdsChannel::~XdsChannel() {
// called from DualRefCounted::Unref, which cannot have a lock annotation for
// a lock in this subclass.
void XdsClient::XdsChannel::Orphaned() ABSL_NO_THREAD_SAFETY_ANALYSIS {
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << xds_client()
<< "] orphaning xds channel " < < < <
this << " for server " << server_.server_uri();
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] orphaning xds channel " << this
<< " for server " << server_.server_uri();
shutting_down_ = true;
transport_.reset();
// At this time, all strong refs are removed, remove from channel map to
@ -581,8 +581,8 @@ bool XdsClient::XdsChannel::MaybeFallbackLocked(
if (authority_state.xds_channels.back()->status().ok()) return true;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] authority " < < < <
authority << ": No fallback server";
<< "[xds_client " << xds_client_.get() << "] authority " << authority
<< ": No fallback server";
return false;
}
@ -1698,8 +1698,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
// If we already have a cached value for the resource, notify the new
// watcher immediately.
if (resource_state.resource != nullptr) {
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << this < < < <
"] returning cached listener data for " << name;
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] returning cached listener data for "
<< name;
work_serializer_.Schedule(
[watcher, value = resource_state.resource]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
@ -1709,8 +1710,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::DOES_NOT_EXIST) {
GRPC_TRACE_LOG(xds_client, INFO) << "[xds_client " << this < < < <
"] reporting cached does-not-exist for " << name;
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] reporting cached does-not-exist for " << name;
work_serializer_.Schedule(
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnResourceDoesNotExist(ReadDelayHandle::NoWait());

@ -98,8 +98,8 @@ void ServerMetricRecorder::SetMemoryUtilization(double value) {
void ServerMetricRecorder::SetApplicationUtilization(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this < < < <
"] Application utilization rejected: " << value;
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState([value](BackendMetricData* data) {
@ -141,13 +141,13 @@ void ServerMetricRecorder::SetEps(double value) {
void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
if (!IsUtilizationValid(value)) {
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization rejected: " << value < < < <
" name: " << std::string(name.data(), name.size());
<< "[" << this << "] Named utilization rejected: " << value
<< " name: " << std::string(name.data(), name.size());
return;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization set: " << value < < < <
" name: " << std::string(name.data(), name.size());
<< "[" << this << "] Named utilization set: " << value
<< " name: " << std::string(name.data(), name.size());
UpdateBackendMetricDataState([name, value](BackendMetricData* data) {
data->utilization[absl::string_view(name.data(), name.size())] = value;
});
@ -156,8 +156,8 @@ void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
void ServerMetricRecorder::SetAllNamedUtilization(
std::map<string_ref, double> named_utilization) {
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] All named utilization updated. size: " < < < <
named_utilization.size();
<< "[" << this
<< "] All named utilization updated. size: " << named_utilization.size();
UpdateBackendMetricDataState(
[utilization = std::move(named_utilization)](BackendMetricData* data) {
data->utilization.clear();
@ -208,8 +208,8 @@ void ServerMetricRecorder::ClearEps() {
void ServerMetricRecorder::ClearNamedUtilization(string_ref name) {
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named utilization cleared. name: " < < < <
std::string(name.data(), name.size());
<< "[" << this << "] Named utilization cleared. name: "
<< std::string(name.data(), name.size());
UpdateBackendMetricDataState([name](BackendMetricData* data) {
data->utilization.erase(absl::string_view(name.data(), name.size()));
});
@ -275,8 +275,8 @@ BackendMetricState::RecordMemoryUtilizationMetric(double value) {
experimental::CallMetricRecorder&
BackendMetricState::RecordApplicationUtilizationMetric(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this < < < <
"] Application utilization value rejected: " << value;
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization value rejected: " << value;
return *this;
}
application_utilization_.store(value, std::memory_order_relaxed);
@ -320,16 +320,15 @@ experimental::CallMetricRecorder& BackendMetricState::RecordUtilizationMetric(
string_ref name, double value) {
if (!IsUtilizationValid(value)) {
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Utilization value rejected: " < < < <
std::string(name.data(), name.length()) << " " << value;
<< "[" << this << "] Utilization value rejected: "
<< std::string(name.data(), name.length()) << " " << value;
return *this;
}
internal::MutexLock lock(&mu_);
absl::string_view name_sv(name.data(), name.length());
utilization_[name_sv] = value;
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Utilization recorded: " << name_sv << " " < < < <
value;
<< "[" << this << "] Utilization recorded: " << name_sv << " " << value;
return *this;
}
@ -339,9 +338,7 @@ experimental::CallMetricRecorder& BackendMetricState::RecordRequestCostMetric(
absl::string_view name_sv(name.data(), name.length());
request_cost_[name_sv] = value;
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Request cost recorded: " << name_sv
<< " " < < < <
value;
<< "[" << this << "] Request cost recorded: " << name_sv << " " << value;
return *this;
}
@ -351,9 +348,7 @@ experimental::CallMetricRecorder& BackendMetricState::RecordNamedMetric(
absl::string_view name_sv(name.data(), name.length());
named_metrics_[name_sv] = value;
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Named metric recorded: " << name_sv
<< " " < < < <
value;
<< "[" << this << "] Named metric recorded: " << name_sv << " " << value;
return *this;
}

Loading…
Cancel
Save