[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED (#37387)

[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED

Closes #37387

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37387 from tanvi-jagtap:GRPC_TRACE_FLAG_ENABLED_06 1b87cfece9
PiperOrigin-RevId: 662349186
pull/37443/head
Tanvi Jagtap 6 months ago committed by Copybara-Service
parent 7e15ebee31
commit 28730dcfd9
  1. 129
      src/core/client_channel/retry_filter_legacy_call_data.cc
  2. 8
      src/core/client_channel/subchannel.cc
  3. 10
      src/core/ext/filters/backend_metrics/backend_metric_filter.cc
  4. 9
      src/core/ext/filters/http/message_compress/compression_filter.cc
  5. 9
      src/core/ext/filters/message_size/message_size_filter.cc
  6. 5
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  7. 5
      src/core/ext/transport/chaotic_good/server_transport.cc
  8. 5
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  9. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  10. 27
      src/core/handshaker/handshaker.cc
  11. 5
      src/core/handshaker/security/secure_endpoint.cc
  12. 35
      src/core/resolver/xds/xds_dependency_manager.cc
  13. 23
      src/core/resolver/xds/xds_resolver.cc
  14. 8
      src/core/server/xds_server_config_fetcher.cc
  15. 8
      src/core/tsi/fake_transport_security.cc
  16. 14
      src/core/xds/grpc/xds_client_grpc.cc
  17. 89
      src/core/xds/xds_client/xds_client.cc
  18. 18
      src/core/xds/xds_client/xds_client_stats.cc
  19. 113
      src/cpp/server/backend_metric_recorder.cc

@ -139,21 +139,18 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
}
},
is_transparent_retry);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this
<< ": created attempt, lb_call=" << lb_call_.get();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld << " attempt=" << this
<< ": created attempt, lb_call=" << lb_call_.get();
// If per_attempt_recv_timeout is set, start a timer.
if (calld->retry_policy_ != nullptr &&
calld->retry_policy_->per_attempt_recv_timeout().has_value()) {
const Duration per_attempt_recv_timeout =
*calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this << ": per-attempt timeout in "
<< per_attempt_recv_timeout.millis() << " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this << ": per-attempt timeout in "
<< per_attempt_recv_timeout.millis() << " ms";
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(calld->owning_call_, "OnPerAttemptRecvTimer");
Ref(DEBUG_LOCATION, "OnPerAttemptRecvTimer").release();
@ -316,11 +313,10 @@ void StartBatchInCallCombiner(void* arg, grpc_error_handle /*ignored*/) {
void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
grpc_transport_stream_op_batch* batch, const char* reason,
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": adding batch (" << reason
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": adding batch (" << reason
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
batch->handler_private.extra_arg = lb_call_.get();
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
batch, grpc_schedule_on_exec_ctx);
@ -527,11 +523,10 @@ void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
AddRetriableBatches(&closures);
// Note: This will yield the call combiner.
// Start batches on LB call.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": starting " << closures.size()
<< " retriable batches on lb_call=" << lb_call_.get();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": starting " << closures.size()
<< " retriable batches on lb_call=" << lb_call_.get();
closures.RunClosures(calld_->call_combiner_);
}
@ -595,28 +590,24 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
++calld_->num_attempts_completed_;
if (calld_->num_attempts_completed_ >=
calld_->retry_policy_->max_attempts()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": exceeded "
<< calld_->retry_policy_->max_attempts() << " retry attempts";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": exceeded "
<< calld_->retry_policy_->max_attempts() << " retry attempts";
return false;
}
// Check server push-back.
if (server_pushback.has_value()) {
if (*server_pushback < Duration::Zero()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": not retrying due to server push-back";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": not retrying due to server push-back";
return false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": server push-back: retry in "
<< server_pushback->millis() << " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": server push-back: retry in "
<< server_pushback->millis() << " ms";
}
}
// We should retry.
@ -700,11 +691,9 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void RetryFilter::LegacyCallData::CallAttempt::
MaybeCancelPerAttemptRecvTimer() {
if (per_attempt_recv_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": cancelling perAttemptRecvTimeout timer";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": cancelling perAttemptRecvTimeout timer";
if (calld_->chand_->event_engine()->Cancel(
*per_attempt_recv_timer_handle_)) {
Unref(DEBUG_LOCATION, "OnPerAttemptRecvTimer");
@ -723,11 +712,10 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
: RefCounted(GRPC_TRACE_FLAG_ENABLED(retry) ? "BatchData" : nullptr,
refcount),
call_attempt_(attempt.release()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": creating batch " << this;
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_ << " attempt=" << call_attempt_
<< ": creating batch " << this;
// We hold a ref to the call stack for every batch sent on a call attempt.
// This is because some batches on the call attempt may not complete
// until after all of the batches are completed at the surface (because
@ -744,11 +732,10 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
}
RetryFilter::LegacyCallData::CallAttempt::BatchData::~BatchData() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": destroying batch " << this;
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_ << " attempt=" << call_attempt_
<< ": destroying batch " << this;
CallAttempt* call_attempt = std::exchange(call_attempt_, nullptr);
grpc_call_stack* owning_call = call_attempt->calld_->owning_call_;
call_attempt->Unref(DEBUG_LOCATION, "~BatchData");
@ -844,11 +831,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
if (GPR_UNLIKELY(
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
std::move(batch_data);
call_attempt->recv_initial_metadata_error_ = error;
@ -1254,11 +1240,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
}
}
if (have_pending_send_ops) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting next batch for pending send op(s)";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting next batch for pending send op(s)";
call_attempt_->AddRetriableBatches(closures);
}
}
@ -1576,11 +1561,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (GPR_UNLIKELY(batch->cancel_stream)) {
// Save cancel_error in case subsequent batches are started.
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelled from surface: "
<< StatusToString(cancelled_from_surface_);
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelled from surface: "
<< StatusToString(cancelled_from_surface_);
// Fail any pending batches.
PendingBatchesFail(cancelled_from_surface_);
// If we have a current call attempt, commit the call, then send
@ -1644,11 +1627,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (!retry_codepath_started_ && retry_committed_ &&
(retry_policy_ == nullptr ||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retry committed before first attempt; "
<< "creating LB call";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this
<< ": retry committed before first attempt; creating LB call";
PendingBatchClear(pending);
auto* service_config_call_data =
DownCast<ClientChannelServiceConfigCallData*>(
@ -1942,11 +1923,9 @@ void RetryFilter::LegacyCallData::StartRetryTimer(
} else {
next_attempt_timeout = retry_backoff_.NextAttemptTime() - Timestamp::Now();
}
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retrying failed call in " << next_attempt_timeout.millis()
<< " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": retrying failed call in "
<< next_attempt_timeout.millis() << " ms";
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
retry_timer_handle_ =

@ -893,11 +893,9 @@ bool Subchannel::PublishTransportLocked() {
}
connecting_result_.Reset();
// Publish.
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": new connected subchannel at "
<< connected_subchannel_.get();
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString()
<< ": new connected subchannel at " << connected_subchannel_.get();
if (channelz_node_ != nullptr) {
channelz_node_->SetChildSocket(std::move(socket_node));
}

@ -128,9 +128,8 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
if (md.get(GrpcCallWasCancelled()).value_or(false)) return;
auto* ctx = MaybeGetContext<BackendMetricProvider>();
if (ctx == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
LOG(INFO) << "[" << this << "] No BackendMetricProvider.";
}
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this << "] No BackendMetricProvider.";
return;
}
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
@ -140,8 +139,9 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
<< "] Backend metrics serialized. size: " << serialized->size();
md.Set(EndpointLoadMetricsBinMetadata(),
Slice::FromCopiedString(std::move(*serialized)));
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
LOG(INFO) << "[" << this << "] No backend metrics.";
} else {
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this << "] No backend metrics.";
}
}

@ -165,11 +165,10 @@ MessageHandle ChannelCompression::CompressMessage(
absl::StatusOr<MessageHandle> ChannelCompression::DecompressMessage(
bool is_client, MessageHandle message, DecompressArgs args) const {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
LOG(INFO) << "DecompressMessage: len=" << message->payload()->Length()
<< " max=" << args.max_recv_message_length.value_or(-1)
<< " alg=" << args.algorithm;
}
GRPC_TRACE_LOG(compression, INFO)
<< "DecompressMessage: len=" << message->payload()->Length()
<< " max=" << args.max_recv_message_length.value_or(-1)
<< " alg=" << args.algorithm;
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
if (call_tracer != nullptr) {
call_tracer->RecordReceivedMessage(*message->payload());

@ -159,11 +159,10 @@ ServerMetadataHandle CheckPayload(const Message& msg,
absl::optional<uint32_t> max_length,
bool is_client, bool is_send) {
if (!max_length.has_value()) return nullptr;
if (GRPC_TRACE_FLAG_ENABLED(call)) {
LOG(INFO) << GetContext<Activity>()->DebugTag() << "[message_size] "
<< (is_send ? "send" : "recv")
<< " len:" << msg.payload()->Length() << " max:" << *max_length;
}
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << "[message_size] "
<< (is_send ? "send" : "recv") << " len:" << msg.payload()->Length()
<< " max:" << *max_length;
if (msg.payload()->Length() <= *max_length) return nullptr;
return ServerMetadataFromStatus(
GRPC_STATUS_RESOURCE_EXHAUSTED,

@ -318,9 +318,8 @@ void ChaoticGoodConnector::OnHandshakeDone(
},
EventEngineWakeupScheduler(event_engine_),
[self = RefAsSubclass<ChaoticGoodConnector>()](absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "ChaoticGoodConnector::OnHandshakeDone: " << status;
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "ChaoticGoodConnector::OnHandshakeDone: " << status;
if (status.ok()) {
MutexLock lock(&self->mu_);
self->result_->transport = new ChaoticGoodClientTransport(

@ -128,9 +128,8 @@ auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
auto ChaoticGoodServerTransport::SendFragment(
ServerFragmentFrame frame, MpscSender<ServerFrame> outgoing_frames,
CallInitiator call_initiator) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: SendFragment: frame=" << frame.ToString();
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: SendFragment: frame=" << frame.ToString();
// Capture the call_initiator to ensure the underlying call spine is alive
// until the outgoing_frames.Send promise completes.
return Map(outgoing_frames.Send(std::move(frame)),

@ -110,8 +110,9 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (t->ping_abuse_policy.ReceivedOnePing(transport_idle)) {
grpc_chttp2_exceeded_ping_strikes(t);
}
} else if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
LOG(INFO) << "CLIENT[" << t << "]: received ping " << p->opaque_8bytes;
} else {
GRPC_TRACE_LOG(http2_ping, INFO)
<< "CLIENT[" << t << "]: received ping " << p->opaque_8bytes;
}
if (t->ack_pings) {
if (t->ping_ack_count == t->ping_ack_capacity) {

@ -117,9 +117,8 @@ void HPackCompressor::SetMaxUsableSize(uint32_t max_table_size) {
void HPackCompressor::SetMaxTableSize(uint32_t max_table_size) {
if (table_.SetMaxSize(std::min(max_usable_size_, max_table_size))) {
advertise_table_size_change_ = true;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "set max table size from encoder to " << max_table_size;
}
GRPC_TRACE_LOG(http, INFO)
<< "set max table size from encoder to " << max_table_size;
}
}

@ -81,11 +81,10 @@ HandshakeManager::HandshakeManager()
void HandshakeManager::Add(RefCountedPtr<Handshaker> handshaker) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": adding handshaker "
<< std::string(handshaker->name()) << " [" << handshaker.get()
<< "] at index " << handshakers_.size();
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": adding handshaker "
<< std::string(handshaker->name()) << " [" << handshaker.get()
<< "] at index " << handshakers_.size();
handshakers_.push_back(std::move(handshaker));
}
@ -153,11 +152,10 @@ void HandshakeManager::Shutdown(absl::Status error) {
}
void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": error=" << error
<< " shutdown=" << is_shutdown_ << " index=" << index_
<< ", args=" << HandshakerArgsString(&args_);
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": error=" << error
<< " shutdown=" << is_shutdown_ << " index=" << index_
<< ", args=" << HandshakerArgsString(&args_);
CHECK(index_ <= handshakers_.size());
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
@ -192,11 +190,10 @@ void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
}
// Call the next handshaker.
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": calling handshaker "
<< handshaker->name() << " [" << handshaker.get() << "] at index "
<< index_;
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": calling handshaker "
<< handshaker->name() << " [" << handshaker.get() << "] at index "
<< index_;
++index_;
handshaker->DoHandshake(&args_, [self = Ref()](absl::Status error) mutable {
MutexLock lock(&self->mu_);

@ -195,9 +195,8 @@ static void maybe_post_reclaimer(secure_endpoint* ep) {
grpc_core::ReclamationPass::kBenign,
[ep](absl::optional<grpc_core::ReclamationSweep> sweep) {
if (sweep.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "secure endpoint: benign reclamation to free memory";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "secure endpoint: benign reclamation to free memory";
grpc_slice temp_read_slice;
grpc_slice temp_write_slice;

@ -374,9 +374,8 @@ XdsDependencyManager::XdsDependencyManager(
}
void XdsDependencyManager::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] shutting down";
if (listener_watcher_ != nullptr) {
XdsListenerResourceType::CancelWatch(
xds_client_.get(), listener_resource_name_, listener_watcher_,
@ -450,11 +449,9 @@ void XdsDependencyManager::OnListenerUpdate(
}
// Start watch for the new RDS resource name.
route_config_name_ = rds_name;
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting watch for route config "
<< route_config_name_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] starting watch for route config " << route_config_name_;
auto watcher =
MakeRefCounted<RouteConfigWatcher>(Ref(), route_config_name_);
route_config_watcher_ = watcher.get();
@ -537,11 +534,9 @@ absl::flat_hash_set<absl::string_view> GetClustersFromVirtualHost(
void XdsDependencyManager::OnRouteConfigUpdate(
const std::string& name,
std::shared_ptr<const XdsRouteConfigResource> route_config) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received RouteConfig update for "
<< (name.empty() ? "<inline>" : name);
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received RouteConfig update for "
<< (name.empty() ? "<inline>" : name);
if (xds_client_ == nullptr) return;
// Ignore updates for stale names.
if (name.empty()) {
@ -572,20 +567,18 @@ void XdsDependencyManager::OnRouteConfigUpdate(
}
void XdsDependencyManager::OnError(std::string context, absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Listener or RouteConfig error: " << context << " "
<< status;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Listener or RouteConfig error: " << context << " "
<< status;
if (xds_client_ == nullptr) return;
if (current_virtual_host_ != nullptr) return;
watcher_->OnError(context, std::move(status));
}
void XdsDependencyManager::OnResourceDoesNotExist(std::string context) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this << "] " << context;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] " << context;
if (xds_client_ == nullptr) return;
current_virtual_host_ = nullptr;
watcher_->OnResourceDoesNotExist(std::move(context));

@ -110,17 +110,14 @@ class XdsResolver final : public Resolver {
uri_(std::move(args.uri)),
data_plane_authority_(std::move(data_plane_authority)),
channel_id_(absl::Uniform<uint64_t>(absl::BitGen())) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] created for URI "
<< uri_.ToString() << "; data plane authority is "
<< data_plane_authority_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] created for URI " << uri_.ToString()
<< "; data plane authority is " << data_plane_authority_;
}
~XdsResolver() override {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] destroyed";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] destroyed";
}
void StartLocked() override;
@ -969,9 +966,8 @@ void XdsResolver::StartLocked() {
}
void XdsResolver::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] shutting down";
if (xds_client_ != nullptr) {
dependency_mgr_.reset();
grpc_pollset_set_del_pollset_set(xds_client_->interested_parties(),
@ -982,9 +978,8 @@ void XdsResolver::ShutdownLocked() {
void XdsResolver::OnUpdate(
RefCountedPtr<const XdsDependencyManager::XdsConfig> config) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] received updated xDS config";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] received updated xDS config";
if (xds_client_ == nullptr) return;
current_config_ = std::move(config);
GenerateResult();

@ -586,11 +586,9 @@ XdsServerConfigFetcher::ListenerWatcher::ListenerWatcher(
void XdsServerConfigFetcher::ListenerWatcher::OnResourceChanged(
std::shared_ptr<const XdsListenerResource> listener,
RefCountedPtr<ReadDelayHandle> /* read_delay_handle */) {
if (GRPC_TRACE_FLAG_ENABLED(xds_server_config_fetcher)) {
LOG(INFO) << "[ListenerWatcher " << this
<< "] Received LDS update from xds client " << xds_client_.get()
<< ": " << listener->ToString();
}
GRPC_TRACE_LOG(xds_server_config_fetcher, INFO)
<< "[ListenerWatcher " << this << "] Received LDS update from xds client "
<< xds_client_.get() << ": " << listener->ToString();
auto* tcp_listener =
absl::get_if<XdsListenerResource::TcpListener>(&listener->listener);
if (tcp_listener == nullptr) {

@ -642,11 +642,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;
}
if (GRPC_TRACE_FLAG_ENABLED(tsi)) {
LOG(INFO) << (impl->is_client ? "Client" : "Server") << " prepared "
<< tsi_fake_handshake_message_to_string(
impl->next_message_to_send);
}
GRPC_TRACE_LOG(tsi, INFO)
<< (impl->is_client ? "Client" : "Server") << " prepared "
<< tsi_fake_handshake_message_to_string(impl->next_message_to_send);
impl->next_message_to_send = next_message_to_send;
}
result =

@ -198,11 +198,10 @@ absl::StatusOr<std::string> GetBootstrapContents(const char* fallback_config) {
// First, try GRPC_XDS_BOOTSTRAP env var.
auto path = GetEnv("GRPC_XDS_BOOTSTRAP");
if (path.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: "
<< *path;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: "
<< *path;
auto contents = LoadFile(*path, /*add_null_terminator=*/true);
if (!contents.ok()) return contents.status();
return std::string(contents->as_string_view());
@ -257,9 +256,8 @@ absl::StatusOr<RefCountedPtr<GrpcXdsClient>> GrpcXdsClient::GetOrCreate(
// Find bootstrap contents.
auto bootstrap_contents = GetBootstrapContents(g_fallback_bootstrap_config);
if (!bootstrap_contents.ok()) return bootstrap_contents.status();
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "xDS bootstrap contents: " << *bootstrap_contents;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "xDS bootstrap contents: " << *bootstrap_contents;
// Parse bootstrap.
auto bootstrap = GrpcXdsBootstrap::Create(*bootstrap_contents);
if (!bootstrap.ok()) return bootstrap.status();

@ -599,11 +599,9 @@ void XdsClient::XdsChannel::SetHealthyLocked() {
auto channel_it = std::find(channels.begin(), channels.end(), this);
// Skip if this is not on the list
if (channel_it != channels.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] authority "
<< authority.first << ": Falling forward to "
<< server_.server_uri();
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] authority "
<< authority.first << ": Falling forward to " << server_.server_uri();
// Lower priority channels are no longer needed, connection is back!
channels.erase(channel_it + 1, channels.end());
}
@ -711,11 +709,10 @@ void XdsClient::XdsChannel::RetryableCall<T>::StartNewCallLocked() {
if (shutting_down_) return;
CHECK(xds_channel_->transport_ != nullptr);
CHECK(call_ == nullptr);
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_channel()->xds_client()
<< "] xds server " << xds_channel()->server_.server_uri()
<< ": start new call from retryable call " << this;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_channel()->xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": start new call from retryable call " << this;
call_ = MakeOrphanable<T>(
this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
}
@ -747,11 +744,10 @@ void XdsClient::XdsChannel::RetryableCall<T>::OnRetryTimer() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_channel()->xds_client()
<< "] xds server " << xds_channel()->server_.server_uri()
<< ": retry timer fired (retryable call: " << this << ")";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_channel()->xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": retry timer fired (retryable call: " << this << ")";
StartNewCallLocked();
}
}
@ -945,11 +941,9 @@ void XdsClient::XdsChannel::AdsCall::AdsResponseParser::ParseResource(
if (resource_state.resource != nullptr &&
result_.type->ResourcesEqual(resource_state.resource.get(),
decode_result.resource->get())) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] " << result_.type_url
<< " resource " << resource_name
<< " identical to current, ignoring.";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] " << result_.type_url
<< " resource " << resource_name << " identical to current, ignoring.";
return;
}
// Update the resource state.
@ -1490,11 +1484,10 @@ void XdsClient::XdsChannel::LrsCall::OnRecvMessage(absl::string_view payload) {
if (send_all_clusters == send_all_clusters_ &&
cluster_names_ == new_cluster_names &&
load_reporting_interval_ == new_load_reporting_interval) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": incoming LRS response identical to current, ignoring.";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": incoming LRS response identical to current, ignoring.";
return;
}
// If the interval has changed, we'll need to restart the timer below.
@ -1559,9 +1552,8 @@ XdsClient::XdsClient(
work_serializer_(engine),
engine_(std::move(engine)),
metrics_reporter_(std::move(metrics_reporter)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] creating xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] creating xds client";
CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) {
GRPC_TRACE_LOG(xds_client, INFO)
@ -1571,15 +1563,13 @@ XdsClient::XdsClient(
}
XdsClient::~XdsClient() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] destroying xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] destroying xds client";
}
void XdsClient::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] shutting down xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] shutting down xds client";
MutexLock lock(&mu_);
shutting_down_ = true;
// Clear cache and any remaining watchers that may not have been cancelled.
@ -1721,11 +1711,10 @@ void XdsClient::WatchResource(const XdsResourceType* type,
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::NACKED) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] reporting cached validation failure for " << name
<< ": " << resource_state.meta.failed_details;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] reporting cached validation failure for " << name << ": "
<< resource_state.meta.failed_details;
std::string details = resource_state.meta.failed_details;
const auto* node = bootstrap_->node();
if (node != nullptr) {
@ -1744,11 +1733,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
}
absl::Status channel_status = authority_state.xds_channels.back()->status();
if (!channel_status.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] returning cached channel error for " << name << ": "
<< channel_status;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] returning cached channel error for "
<< name << ": " << channel_status;
work_serializer_.Schedule(
[watcher = std::move(watcher), status = std::move(channel_status)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) mutable {
@ -2047,9 +2034,8 @@ void XdsClient::NotifyWatchersOnResourceDoesNotExist(
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
const XdsBootstrap::XdsServer& xds_server, bool send_all_clusters,
const std::set<std::string>& clusters) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] start building load report";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] start building load report";
XdsApi::ClusterLoadReportMap snapshot_map;
auto server_it = xds_load_report_server_map_.find(xds_server.Key());
if (server_it == xds_load_report_server_map_.end()) return snapshot_map;
@ -2074,11 +2060,10 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
if (load_report.drop_stats != nullptr) {
snapshot.dropped_requests +=
load_report.drop_stats->GetSnapshotAndReset();
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] cluster=" << cluster_key.first
<< " eds_service_name=" << cluster_key.second
<< " drop_stats=" << load_report.drop_stats;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] cluster=" << cluster_key.first
<< " eds_service_name=" << cluster_key.second
<< " drop_stats=" << load_report.drop_stats;
}
// Aggregate locality stats.
for (auto it = load_report.locality_stats.begin();

@ -51,19 +51,17 @@ XdsClusterDropStats::XdsClusterDropStats(RefCountedPtr<XdsClient> xds_client,
lrs_server_(lrs_server),
cluster_name_(cluster_name),
eds_service_name_(eds_service_name) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] created drop stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_
<< ", " << eds_service_name_ << "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] created drop stats " << this
<< " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << "}";
}
XdsClusterDropStats::~XdsClusterDropStats() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get()
<< "] destroying drop stats " << this << " for {" << lrs_server_
<< ", " << cluster_name_ << ", " << eds_service_name_ << "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] destroying drop stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << "}";
xds_client_->RemoveClusterDropStats(lrs_server_, cluster_name_,
eds_service_name_, this);
xds_client_.reset(DEBUG_LOCATION, "DropStats");

@ -70,30 +70,26 @@ void ServerMetricRecorder::UpdateBackendMetricDataState(
void ServerMetricRecorder::SetCpuUtilization(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->cpu_utilization = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization set: " << value;
}
void ServerMetricRecorder::SetMemoryUtilization(double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->mem_utilization = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization set: " << value;
}
void ServerMetricRecorder::SetApplicationUtilization(double value) {
@ -105,37 +101,30 @@ void ServerMetricRecorder::SetApplicationUtilization(double value) {
UpdateBackendMetricDataState([value](BackendMetricData* data) {
data->application_utilization = value;
});
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization set: " << value;
}
void ServerMetricRecorder::SetQps(double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->qps = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this << "] QPS set: " << value;
}
void ServerMetricRecorder::SetEps(double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->eps = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this << "] EPS set: " << value;
}
void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
@ -171,39 +160,34 @@ void ServerMetricRecorder::SetAllNamedUtilization(
void ServerMetricRecorder::ClearCpuUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->cpu_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization cleared.";
}
void ServerMetricRecorder::ClearMemoryUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->mem_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization cleared.";
}
void ServerMetricRecorder::ClearApplicationUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->application_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization cleared.";
}
void ServerMetricRecorder::ClearQps() {
UpdateBackendMetricDataState([](BackendMetricData* data) { data->qps = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS utilization cleared.";
}
void ServerMetricRecorder::ClearEps() {
UpdateBackendMetricDataState([](BackendMetricData* data) { data->eps = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS utilization cleared.";
}
void ServerMetricRecorder::ClearNamedUtilization(string_ref name) {
@ -245,30 +229,26 @@ ServerMetricRecorder::GetMetricsIfChanged() const {
experimental::CallMetricRecorder&
BackendMetricState::RecordCpuUtilizationMetric(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization value rejected: " << value;
return *this;
}
cpu_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization recorded: " << value;
return *this;
}
experimental::CallMetricRecorder&
BackendMetricState::RecordMemoryUtilizationMetric(double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization value rejected: " << value;
return *this;
}
mem_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization recorded: " << value;
return *this;
}
@ -280,39 +260,34 @@ BackendMetricState::RecordApplicationUtilizationMetric(double value) {
return *this;
}
application_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization recorded: " << value;
return *this;
}
experimental::CallMetricRecorder& BackendMetricState::RecordQpsMetric(
double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS value rejected: " << value;
return *this;
}
qps_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS recorded: " << value;
return *this;
}
experimental::CallMetricRecorder& BackendMetricState::RecordEpsMetric(
double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS value rejected: " << value;
return *this;
}
eps_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS recorded: " << value;
return *this;
}

Loading…
Cancel
Save