[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED (#37483)

[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED

Closes #37483

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37483 from tanvi-jagtap:GRPC_TRACE_FLAG_ENABLED_08 ab8b98e8c4
PiperOrigin-RevId: 663618882
pull/37490/head
Tanvi Jagtap 3 months ago committed by Copybara-Service
parent dad4695af4
commit b6a70e1747
  1. 179
      src/core/client_channel/retry_filter_legacy_call_data.cc
  2. 21
      src/core/client_channel/subchannel.cc
  3. 27
      src/core/ext/transport/chaotic_good/chaotic_good_transport.h
  4. 10
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  5. 11
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  6. 25
      src/core/ext/transport/chttp2/transport/parsing.cc
  7. 10
      src/core/handshaker/handshaker.cc
  8. 11
      src/core/load_balancing/health_check_client.cc
  9. 88
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  10. 91
      src/core/load_balancing/pick_first/pick_first.cc
  11. 76
      src/core/load_balancing/priority/priority.cc
  12. 45
      src/core/load_balancing/ring_hash/ring_hash.cc
  13. 88
      src/core/load_balancing/rls/rls.cc
  14. 28
      src/core/load_balancing/round_robin/round_robin.cc
  15. 65
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  16. 35
      src/core/load_balancing/weighted_target/weighted_target.cc
  17. 21
      src/core/load_balancing/xds/xds_cluster_impl.cc
  18. 13
      src/core/load_balancing/xds/xds_cluster_manager.cc
  19. 45
      src/core/load_balancing/xds/xds_override_host.cc
  20. 11
      src/core/resolver/xds/xds_resolver.cc
  21. 152
      src/core/xds/xds_client/xds_client.cc
  22. 29
      src/core/xds/xds_client/xds_client_stats.cc
  23. 12
      src/cpp/ext/gcp/environment_autodetect.cc
  24. 16
      src/cpp/server/backend_metric_recorder.cc

@ -230,12 +230,11 @@ void RetryFilter::LegacyCallData::CallAttempt::MaybeSwitchToFastPath() {
// yet seen that op from the surface, we can't switch yet.
if (recv_trailing_metadata_internal_batch_ != nullptr) return;
// Switch to fast path.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt";
calld_->committed_call_ = std::move(lb_call_);
calld_->call_attempt_.reset(DEBUG_LOCATION, "MaybeSwitchToFastPath");
}
@ -249,12 +248,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
// send_initial_metadata.
if (calld_->seen_send_initial_metadata_ && !started_send_initial_metadata_ &&
!calld_->pending_send_initial_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_initial_metadata op";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << calld_->chand_
<< " calld=" << calld_ << " attempt=" << this
<< ": replaying previously completed "
"send_initial_metadata op";
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
replay_batch_data->AddRetriableSendInitialMetadataOp();
}
@ -263,12 +260,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
if (started_send_message_count_ < calld_->send_messages_.size() &&
started_send_message_count_ == completed_send_message_count_ &&
!calld_->pending_send_message_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_message op";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << calld_->chand_
<< " calld=" << calld_ << " attempt=" << this
<< ": replaying previously completed "
"send_message op";
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
}
@ -282,12 +277,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
started_send_message_count_ == calld_->send_messages_.size() &&
!started_send_trailing_metadata_ &&
!calld_->pending_send_trailing_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": replaying previously completed "
"send_trailing_metadata op";
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << calld_->chand_
<< " calld=" << calld_ << " attempt=" << this
<< ": replaying previously completed "
"send_trailing_metadata op";
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
}
@ -325,12 +318,11 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
void RetryFilter::LegacyCallData::CallAttempt::
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": call failed but recv_trailing_metadata not started; "
"starting it internally";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": call failed but recv_trailing_metadata not started; "
"starting it internally";
// Create batch_data with 2 refs, since this batch will be unreffed twice:
// once for the recv_trailing_metadata_ready callback when the batch
// completes, and again when we actually get a recv_trailing_metadata
@ -556,12 +548,11 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
}
// Status is not OK. Check whether the status is retryable.
if (!calld_->retry_policy_->retryable_status_codes().Contains(*status)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": status "
<< grpc_status_code_to_string(*status)
<< " not configured as retryable";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": status "
<< grpc_status_code_to_string(*status)
<< " not configured as retryable";
return false;
}
}
@ -651,14 +642,12 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void* arg, grpc_error_handle error) {
auto* call_attempt = static_cast<CallAttempt*>(arg);
auto* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": perAttemptRecvTimeout timer fired: error="
<< StatusToString(error)
<< ", per_attempt_recv_timer_handle_.has_value()="
<< call_attempt->per_attempt_recv_timer_handle_.has_value();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": perAttemptRecvTimeout timer fired: error=" << StatusToString(error)
<< ", per_attempt_recv_timer_handle_.has_value()="
<< call_attempt->per_attempt_recv_timer_handle_.has_value();
CallCombinerClosureList closures;
call_attempt->per_attempt_recv_timer_handle_.reset();
// Cancel this attempt.
@ -804,13 +793,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_initial_metadata_ready, error="
<< StatusToString(error);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << " batch_data=" << batch_data.get()
<< ": got recv_initial_metadata_ready, error=" << StatusToString(error);
call_attempt->completed_recv_initial_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_initial_metadata op, so do nothing.
@ -901,12 +887,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_message_ready, error=" << StatusToString(error);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << " batch_data=" << batch_data.get()
<< ": got recv_message_ready, error=" << StatusToString(error);
++call_attempt->completed_recv_message_count_;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_message op, so do nothing.
@ -930,12 +914,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
if (GPR_UNLIKELY(
(!call_attempt->recv_message_.has_value() || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_message_ready (nullptr message and "
"recv_trailing_metadata pending)";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_message_ready (nullptr message and "
"recv_trailing_metadata pending)";
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
call_attempt->recv_message_error_ = error;
CallCombinerClosureList closures;
@ -1094,13 +1077,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got recv_trailing_metadata_ready, error="
<< StatusToString(error);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << " batch_data=" << batch_data.get()
<< ": got recv_trailing_metadata_ready, error=" << StatusToString(error);
call_attempt->completed_recv_trailing_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_trailing_metadata op, so do nothing.
@ -1121,17 +1101,15 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata;
GetCallStatus(calld->deadline_, md_batch, error, &status, &server_pushback,
&is_lb_drop, &stream_network_state);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << ": call finished, status="
<< grpc_status_code_to_string(status) << " server_pushback="
<< (server_pushback.has_value() ? server_pushback->ToString()
: "N/A")
<< " is_lb_drop=" << is_lb_drop << " stream_network_state="
<< (stream_network_state.has_value()
? absl::StrCat(*stream_network_state)
: "N/A");
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": call finished, status=" << grpc_status_code_to_string(status)
<< " server_pushback="
<< (server_pushback.has_value() ? server_pushback->ToString() : "N/A")
<< " is_lb_drop=" << is_lb_drop << " stream_network_state="
<< (stream_network_state.has_value() ? absl::StrCat(*stream_network_state)
: "N/A");
// Check if we should retry.
if (!is_lb_drop) { // Never retry on LB drops.
enum { kNoRetry, kTransparentRetry, kConfigurableRetry } retry = kNoRetry;
@ -1253,15 +1231,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got on_complete, error=" << StatusToString(error)
<< ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
false);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << " batch_data=" << batch_data.get()
<< ": got on_complete, error=" << StatusToString(error) << ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_, false);
// If this attempt has been abandoned, then we're not going to propagate
// the completion of this batch, so do nothing.
if (call_attempt->abandoned_) {
@ -1328,15 +1302,12 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnCompleteForCancelOp(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< " batch_data=" << batch_data.get()
<< ": got on_complete for cancel_stream batch, error="
<< StatusToString(error) << ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
false);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt << " batch_data=" << batch_data.get()
<< ": got on_complete for cancel_stream batch, error="
<< StatusToString(error) << ", batch="
<< grpc_transport_stream_op_batch_string(&batch_data->batch_, false);
GRPC_CALL_COMBINER_STOP(
calld->call_combiner_,
"on_complete for internally generated cancel_stream op");
@ -1371,12 +1342,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
void RetryFilter::LegacyCallData::CallAttempt::BatchData::
AddRetriableSendMessageOp() {
auto* calld = call_attempt_->calld_;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting calld->send_messages["
<< call_attempt_->started_send_message_count_ << "]";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_ << ": starting calld->send_messages["
<< call_attempt_->started_send_message_count_ << "]";
CachedSendMessage cache =
calld->send_messages_[call_attempt_->started_send_message_count_];
++call_attempt_->started_send_message_count_;

@ -417,12 +417,11 @@ class Subchannel::ConnectedSubchannelStateWatcher final
if (c->connected_subchannel_ == nullptr) return;
if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
new_state == GRPC_CHANNEL_SHUTDOWN) {
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << c << " " << c->key_.ToString()
<< ": Connected subchannel "
<< c->connected_subchannel_.get() << " reports "
<< ConnectivityStateName(new_state) << ": " << status;
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << c << " " << c->key_.ToString()
<< ": Connected subchannel " << c->connected_subchannel_.get()
<< " reports " << ConnectivityStateName(new_state) << ": "
<< status;
c->connected_subchannel_.reset();
if (c->channelz_node() != nullptr) {
c->channelz_node()->SetChildSocket(nullptr);
@ -803,12 +802,10 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
if (connecting_result_.transport == nullptr || !PublishTransportLocked()) {
const Duration time_until_next_attempt =
next_attempt_time_ - Timestamp::Now();
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": connect failed (" << StatusToString(error)
<< "), backing off for " << time_until_next_attempt.millis()
<< " ms";
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString()
<< ": connect failed (" << StatusToString(error)
<< "), backing off for " << time_until_next_attempt.millis() << " ms";
SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_to_absl_status(error));
retry_timer_handle_ = event_engine_->RunAfter(

@ -56,12 +56,11 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
bool saw_encoding_errors = false;
auto buffers = frame.Serialize(&encoder_, saw_encoding_errors);
// ignore encoding errors: they will be logged separately already
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: WriteFrame to:"
<< ResolvedAddressToString(control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
<< " " << frame.ToString();
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: WriteFrame to:"
<< ResolvedAddressToString(control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
<< " " << frame.ToString();
return TryJoin<absl::StatusOr>(
control_endpoint_.Write(std::move(buffers.control)),
data_endpoint_.Write(std::move(buffers.data)));
@ -76,15 +75,13 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
auto frame_header =
FrameHeader::Parse(reinterpret_cast<const uint8_t*>(
GRPC_SLICE_START_PTR(read_buffer.c_slice())));
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: ReadHeader from:"
<< ResolvedAddressToString(
control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
<< " "
<< (frame_header.ok() ? frame_header->ToString()
: frame_header.status().ToString());
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: ReadHeader from:"
<< ResolvedAddressToString(control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
<< " "
<< (frame_header.ok() ? frame_header->ToString()
: frame_header.status().ToString());
// Read header and trailers from control endpoint.
// Read message padding and message from data endpoint.
return If(

@ -300,12 +300,10 @@ auto ChaoticGoodServerListener::ActiveConnection::HandshakingState::
},
[self](PromiseEndpoint ret) -> absl::Status {
MutexLock lock(&self->connection_->listener_->mu_);
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << self->connection_.get()
<< " Data endpoint setup done: shutdown="
<< (self->connection_->listener_->shutdown_ ? "true"
: "false");
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< self->connection_.get()
<< " Data endpoint setup done: shutdown="
<< (self->connection_->listener_->shutdown_ ? "true" : "false");
if (self->connection_->listener_->shutdown_) {
return absl::UnavailableError("Server shutdown");
}

@ -187,12 +187,11 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
"invalid value %u passed for %s", parser->value,
grpc_core::Http2Settings::WireIdToName(parser->id).c_str()));
}
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "CHTTP2:" << (t->is_client ? "CLI" : "SVR") << ":"
<< t->peer_string.as_string_view() << ": got setting "
<< grpc_core::Http2Settings::WireIdToName(parser->id)
<< " = " << parser->value;
}
GRPC_TRACE_LOG(http, INFO)
<< "CHTTP2:" << (t->is_client ? "CLI" : "SVR") << ":"
<< t->peer_string.as_string_view() << ": got setting "
<< grpc_core::Http2Settings::WireIdToName(parser->id) << " = "
<< parser->value;
} break;
}
}

@ -331,13 +331,11 @@ absl::variant<size_t, absl::Status> grpc_chttp2_perform_read(
case GRPC_DTS_FH_8:
DCHECK_LT(cur, end);
t->incoming_stream_id |= (static_cast<uint32_t>(*cur));
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "INCOMING[" << t << "]: "
<< FrameTypeString(t->incoming_frame_type,
t->incoming_frame_flags)
<< " len:" << t->incoming_frame_size
<< absl::StrFormat(" id:0x%08x", t->incoming_stream_id);
}
GRPC_TRACE_LOG(http, INFO)
<< "INCOMING[" << t << "]: "
<< FrameTypeString(t->incoming_frame_type, t->incoming_frame_flags)
<< " len:" << t->incoming_frame_size
<< absl::StrFormat(" id:0x%08x", t->incoming_stream_id);
t->deframe_state = GRPC_DTS_FRAME;
err = init_frame_parser(t, requests_started);
if (!err.ok()) {
@ -453,10 +451,9 @@ static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t,
case GRPC_CHTTP2_FRAME_GOAWAY:
return init_goaway_parser(t);
default:
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(ERROR) << "Unknown frame type "
<< absl::StrFormat("%02x", t->incoming_frame_type);
}
GRPC_TRACE_LOG(http, ERROR)
<< "Unknown frame type "
<< absl::StrFormat("%02x", t->incoming_frame_type);
return init_non_header_skip_frame_parser(t);
}
}
@ -790,10 +787,8 @@ static grpc_error_handle init_window_update_frame_parser(
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(ERROR) << "Stream " << t->incoming_stream_id
<< " not found, ignoring WINDOW_UPDATE";
}
GRPC_TRACE_LOG(http, ERROR) << "Stream " << t->incoming_stream_id
<< " not found, ignoring WINDOW_UPDATE";
return init_non_header_skip_frame_parser(t);
}
s->call_tracer_wrapper.RecordIncomingBytes({9, 0, 0});

@ -166,12 +166,10 @@ void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
error = GRPC_ERROR_CREATE("handshaker shutdown");
args_.endpoint.reset();
}
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this
<< ": handshaking complete -- scheduling "
"on_handshake_done with error="
<< error;
}
GRPC_TRACE_LOG(handshaker, INFO) << "handshake_manager " << this
<< ": handshaking complete -- scheduling "
"on_handshake_done with error="
<< error;
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
args_.event_engine->Cancel(deadline_timer_handle_);

@ -458,12 +458,11 @@ void HealthWatcher::SetSubchannel(Subchannel* subchannel) {
if (created) producer_->Start(subchannel->Ref());
// Register ourself with the producer.
producer_->AddWatcher(this, health_check_service_name_);
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthWatcher " << this << ": registered with producer "
<< producer_.get() << " (created=" << created
<< ", health_check_service_name=\""
<< health_check_service_name_.value_or("N/A") << "\")";
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthWatcher " << this << ": registered with producer "
<< producer_.get() << " (created=" << created
<< ", health_check_service_name=\""
<< health_check_service_name_.value_or("N/A") << "\")";
}
void HealthWatcher::Notify(grpc_connectivity_state state, absl::Status status) {

@ -740,12 +740,11 @@ void OutlierDetectionLb::MaybeUpdatePickerLocked() {
if (picker_ != nullptr) {
auto outlier_detection_picker =
MakeRefCounted<Picker>(this, picker_, config_->CountingEnabled());
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
<< "] updating connectivity: state="
<< ConnectivityStateName(state_) << " status=(" << status_
<< ") picker=" << outlier_detection_picker.get();
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] updating connectivity: state=" << ConnectivityStateName(state_)
<< " status=(" << status_
<< ") picker=" << outlier_detection_picker.get();
channel_control_helper()->UpdateState(state_, status_,
std::move(outlier_detection_picker));
}
@ -806,12 +805,11 @@ void OutlierDetectionLb::Helper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (parent()->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent()
<< "] child connectivity state update: state="
<< ConnectivityStateName(state) << " (" << status
<< ") picker=" << picker.get();
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent()
<< "] child connectivity state update: state="
<< ConnectivityStateName(state) << " (" << status
<< ") picker=" << picker.get();
// Save the state and picker.
parent()->state_ = state;
parent()->status_ = status;
@ -892,26 +890,24 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
}
}
}
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get() << "] found "
<< success_rate_ejection_candidates.size()
<< " success rate candidates and "
<< failure_percentage_ejection_candidates.size()
<< " failure percentage candidates; ejected_host_count="
<< ejected_host_count << "; success_rate_sum="
<< absl::StrFormat("%.3f", success_rate_sum);
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() << "] found "
<< success_rate_ejection_candidates.size()
<< " success rate candidates and "
<< failure_percentage_ejection_candidates.size()
<< " failure percentage candidates; ejected_host_count="
<< ejected_host_count
<< "; success_rate_sum=" << absl::StrFormat("%.3f", success_rate_sum);
// success rate algorithm
if (!success_rate_ejection_candidates.empty() &&
success_rate_ejection_candidates.size() >=
config.success_rate_ejection->minimum_hosts) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] running success rate algorithm: "
<< "stdev_factor=" << config.success_rate_ejection->stdev_factor
<< ", enforcement_percentage="
<< config.success_rate_ejection->enforcement_percentage;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] running success rate algorithm: "
<< "stdev_factor=" << config.success_rate_ejection->stdev_factor
<< ", enforcement_percentage="
<< config.success_rate_ejection->enforcement_percentage;
// calculate ejection threshold: (mean - stdev *
// (success_rate_ejection.stdev_factor / 1000))
double mean = success_rate_sum / success_rate_ejection_candidates.size();
@ -936,13 +932,11 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
uint32_t random_key = absl::Uniform(bit_gen_, 1, 100);
double current_percent =
100.0 * ejected_host_count / parent_->endpoint_state_map_.size();
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] random_key=" << random_key
<< " ejected_host_count=" << ejected_host_count
<< " current_percent="
<< absl::StrFormat("%.3f", current_percent);
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] random_key=" << random_key
<< " ejected_host_count=" << ejected_host_count
<< " current_percent=" << absl::StrFormat("%.3f", current_percent);
if (random_key < config.success_rate_ejection->enforcement_percentage &&
(ejected_host_count == 0 ||
(current_percent < config.max_ejection_percent))) {
@ -961,13 +955,12 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!failure_percentage_ejection_candidates.empty() &&
failure_percentage_ejection_candidates.size() >=
config.failure_percentage_ejection->minimum_hosts) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] running failure percentage algorithm: "
<< "threshold=" << config.failure_percentage_ejection->threshold
<< ", enforcement_percentage="
<< config.failure_percentage_ejection->enforcement_percentage;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] running failure percentage algorithm: "
<< "threshold=" << config.failure_percentage_ejection->threshold
<< ", enforcement_percentage="
<< config.failure_percentage_ejection->enforcement_percentage;
for (auto& candidate : failure_percentage_ejection_candidates) {
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
@ -981,12 +974,11 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
uint32_t random_key = absl::Uniform(bit_gen_, 1, 100);
double current_percent =
100.0 * ejected_host_count / parent_->endpoint_state_map_.size();
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] random_key=" << random_key
<< " ejected_host_count=" << ejected_host_count
<< " current_percent=" << current_percent;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] random_key=" << random_key
<< " ejected_host_count=" << ejected_host_count
<< " current_percent=" << current_percent;
if (random_key <
config.failure_percentage_ejection->enforcement_percentage &&
(ejected_host_count == 0 ||

@ -735,15 +735,13 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::
OnConnectivityStateChange(grpc_connectivity_state new_state,
absl::Status status) {
if (watcher_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): connectivity changed: new_state="
<< ConnectivityStateName(new_state) << ", status=" << status
<< ", watcher=" << watcher_
<< ", subchannel_data_=" << subchannel_data_
<< ", pick_first_->selected_=" << pick_first_->selected_.get();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): connectivity changed: new_state="
<< ConnectivityStateName(new_state) << ", status=" << status
<< ", watcher=" << watcher_ << ", subchannel_data_=" << subchannel_data_
<< ", pick_first_->selected_=" << pick_first_->selected_.get();
// If we're still part of a subchannel list trying to connect, check
// if we're connected.
if (subchannel_data_ != nullptr) {
@ -944,12 +942,10 @@ void PickFirst::SubchannelList::SubchannelData::RequestConnectionWithTimer() {
// If this is not the last subchannel in the list, start the timer.
if (index_ != subchannel_list_->size() - 1) {
PickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index "
<< index_;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index " << index_;
subchannel_list_->timer_handle_ =
p->channel_control_helper()->GetEventEngine()->RunAfter(
p->connection_attempt_delay_,
@ -960,15 +956,13 @@ void PickFirst::SubchannelList::SubchannelData::RequestConnectionWithTimer() {
auto* sl = subchannel_list.get();
sl->policy_->work_serializer()->Run(
[subchannel_list = std::move(subchannel_list)]() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
"(shutting_down="
<< subchannel_list->shutting_down_ << ", selected="
<< subchannel_list->policy_->selected_.get() << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
"(shutting_down="
<< subchannel_list->shutting_down_ << ", selected="
<< subchannel_list->policy_->selected_.get() << ")";
if (subchannel_list->shutting_down_) return;
if (subchannel_list->policy_->selected_ != nullptr) return;
++subchannel_list->attempting_index_;
@ -1588,13 +1582,11 @@ OldPickFirst::SubchannelList::SubchannelData::SubchannelData(
void OldPickFirst::SubchannelList::SubchannelData::ShutdownLocked() {
if (subchannel_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index "
<< index_ << " of " << subchannel_list_->size()
<< " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << subchannel_list_->policy_.get() << "] subchannel list "
<< subchannel_list_ << " index " << index_ << " of "
<< subchannel_list_->size() << " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
subchannel_->CancelConnectivityStateWatch(pending_watcher_);
pending_watcher_ = nullptr;
subchannel_.reset();
@ -1814,12 +1806,10 @@ void OldPickFirst::SubchannelList::SubchannelData::
// If this is not the last subchannel in the list, start the timer.
if (index_ != subchannel_list_->size() - 1) {
OldPickFirst* p = subchannel_list_->policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index "
<< index_;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " subchannel list " << subchannel_list_
<< ": starting Connection Attempt Delay timer for "
<< p->connection_attempt_delay_.millis() << "ms for index " << index_;
subchannel_list_->timer_handle_ =
p->channel_control_helper()->GetEventEngine()->RunAfter(
p->connection_attempt_delay_,
@ -1830,15 +1820,13 @@ void OldPickFirst::SubchannelList::SubchannelData::
auto* sl = subchannel_list.get();
sl->policy_->work_serializer()->Run(
[subchannel_list = std::move(subchannel_list)]() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
<< "(shutting_down="
<< subchannel_list->shutting_down_ << ", selected="
<< subchannel_list->policy_->selected_ << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << subchannel_list->policy_.get()
<< " subchannel list " << subchannel_list.get()
<< ": Connection Attempt Delay timer fired "
<< "(shutting_down=" << subchannel_list->shutting_down_
<< ", selected=" << subchannel_list->policy_->selected_
<< ")";
if (subchannel_list->shutting_down_) return;
if (subchannel_list->policy_->selected_ != nullptr) return;
++subchannel_list->attempting_index_;
@ -2004,12 +1992,11 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us.
if (policy_->latest_pending_subchannel_list_.get() == this) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << policy_.get()
<< " promoting pending subchannel list "
<< policy_->latest_pending_subchannel_list_.get()
<< " to replace " << this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get()
<< " promoting pending subchannel list "
<< policy_->latest_pending_subchannel_list_.get() << " to replace "
<< this;
policy_->UnsetSelectedSubchannel();
policy_->subchannel_list_ =
std::move(policy_->latest_pending_subchannel_list_);

@ -435,12 +435,11 @@ void PriorityLb::ChoosePriorityLocked() {
return;
}
// Child has been failing for a while. Move on to the next priority.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] skipping priority " << priority
<< ", child " << child_name << ": state="
<< ConnectivityStateName(child->connectivity_state())
<< ", failover timer not pending";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] skipping priority " << priority
<< ", child " << child_name
<< ": state=" << ConnectivityStateName(child->connectivity_state())
<< ", failover timer not pending";
}
// If we didn't find any priority to try, pick the first one in state
// CONNECTING.
@ -472,12 +471,10 @@ void PriorityLb::ChoosePriorityLocked() {
void PriorityLb::SetCurrentPriorityLocked(int32_t priority,
bool deactivate_lower_priorities,
const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] selecting priority " << priority
<< ", child " << config_->priorities()[priority] << " (" << reason
<< ", deactivate_lower_priorities=" << deactivate_lower_priorities
<< ")";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] selecting priority " << priority
<< ", child " << config_->priorities()[priority] << " (" << reason
<< ", deactivate_lower_priorities=" << deactivate_lower_priorities << ")";
current_priority_ = priority;
if (deactivate_lower_priorities) {
for (uint32_t p = priority + 1; p < config_->priorities().size(); ++p) {
@ -500,12 +497,11 @@ void PriorityLb::SetCurrentPriorityLocked(int32_t priority,
PriorityLb::ChildPriority::DeactivationTimer::DeactivationTimer(
RefCountedPtr<PriorityLb::ChildPriority> child_priority)
: child_priority_(std::move(child_priority)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): deactivating -- will remove in "
<< kChildRetentionInterval.millis() << "ms";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): deactivating -- will remove in "
<< kChildRetentionInterval.millis() << "ms";
timer_handle_ =
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
@ -537,12 +533,10 @@ void PriorityLb::ChildPriority::DeactivationTimer::Orphan() {
void PriorityLb::ChildPriority::DeactivationTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get()
<< "): deactivation timer fired, deleting child";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): deactivation timer fired, deleting child";
child_priority_->priority_policy_->DeleteChild(child_priority_.get());
}
}
@ -554,14 +548,12 @@ void PriorityLb::ChildPriority::DeactivationTimer::OnTimerLocked() {
PriorityLb::ChildPriority::FailoverTimer::FailoverTimer(
RefCountedPtr<PriorityLb::ChildPriority> child_priority)
: child_priority_(std::move(child_priority)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): starting failover timer for "
<< child_priority_->priority_policy_->child_failover_timeout_.millis()
<< "ms";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): starting failover timer for "
<< child_priority_->priority_policy_->child_failover_timeout_.millis()
<< "ms";
timer_handle_ =
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
@ -594,12 +586,10 @@ void PriorityLb::ChildPriority::FailoverTimer::Orphan() {
void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get()
<< "): failover timer fired, reporting TRANSIENT_FAILURE";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): failover timer fired, reporting TRANSIENT_FAILURE";
child_priority_->OnConnectivityStateUpdateLocked(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::Status(absl::StatusCode::kUnavailable, "failover timer fired"),
@ -716,12 +706,10 @@ void PriorityLb::ChildPriority::ResetBackoffLocked() {
void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this
<< "): state update: " << ConnectivityStateName(state) << " ("
<< status << ") picker " << picker.get();
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): state update: " << ConnectivityStateName(state)
<< " (" << status << ") picker " << picker.get();
// Store the state and picker.
connectivity_state_ = state;
connectivity_status_ = status;

@ -570,15 +570,13 @@ absl::Status RingHash::RingHashEndpoint::UpdateChildPolicyLocked() {
void RingHash::RingHashEndpoint::OnStateUpdate(
grpc_connectivity_state new_state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << ring_hash_.get()
<< "] connectivity changed for endpoint " << this << " ("
<< ring_hash_->endpoints_[index_].ToString()
<< ", child_policy=" << child_policy_.get()
<< "): prev_state=" << ConnectivityStateName(connectivity_state_)
<< " new_state=" << ConnectivityStateName(new_state) << " ("
<< status << ")";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << ring_hash_.get() << "] connectivity changed for endpoint "
<< this << " (" << ring_hash_->endpoints_[index_].ToString()
<< ", child_policy=" << child_policy_.get()
<< "): prev_state=" << ConnectivityStateName(connectivity_state_)
<< " new_state=" << ConnectivityStateName(new_state) << " (" << status
<< ")";
if (child_policy_ == nullptr) return; // Already orphaned.
// Update state.
const bool entered_transient_failure =
@ -756,15 +754,13 @@ void RingHash::UpdateAggregatedConnectivityStateLocked(
state = GRPC_CHANNEL_TRANSIENT_FAILURE;
start_connection_attempt = true;
}
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] setting connectivity state to "
<< ConnectivityStateName(state) << " (num_idle=" << num_idle
<< ", num_connecting=" << num_connecting
<< ", num_ready=" << num_ready
<< ", num_transient_failure=" << num_transient_failure
<< ", size=" << endpoints_.size()
<< ") -- start_connection_attempt=" << start_connection_attempt;
}
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "] setting connectivity state to "
<< ConnectivityStateName(state) << " (num_idle=" << num_idle
<< ", num_connecting=" << num_connecting << ", num_ready=" << num_ready
<< ", num_transient_failure=" << num_transient_failure
<< ", size=" << endpoints_.size()
<< ") -- start_connection_attempt=" << start_connection_attempt;
// In TRANSIENT_FAILURE, report the last reported failure.
// Otherwise, report OK.
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
@ -833,13 +829,12 @@ void RingHash::UpdateAggregatedConnectivityStateLocked(
auto it = endpoint_map_.find(
EndpointAddressSet(endpoints_[first_idle_index].addresses()));
CHECK(it != endpoint_map_.end());
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this
<< "] triggering internal connection attempt for endpoint "
<< it->second.get() << " ("
<< endpoints_[first_idle_index].ToString() << ") (index "
<< first_idle_index << " of " << endpoints_.size() << ")";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this
<< "] triggering internal connection attempt for endpoint "
<< it->second.get() << " (" << endpoints_[first_idle_index].ToString()
<< ") (index " << first_idle_index << " of " << endpoints_.size()
<< ")";
it->second->RequestConnectionLocked();
}
}

@ -885,12 +885,10 @@ void RlsLb::ChildPolicyWrapper::StartUpdate(
lb_policy_->config_->child_policy_config_target_field_name(), target_,
lb_policy_->config_->child_policy_config(), &errors);
CHECK(child_policy_config.has_value());
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: validating update, config: "
<< JsonDump(*child_policy_config);
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] ChildPolicyWrapper=" << this
<< " [" << target_
<< "]: validating update, config: " << JsonDump(*child_policy_config);
auto config =
CoreConfiguration::Get().lb_policy_registry().ParseLoadBalancingConfig(
*child_policy_config);
@ -921,12 +919,10 @@ absl::Status RlsLb::ChildPolicyWrapper::MaybeFinishUpdate() {
create_args.args = lb_policy_->channel_args_;
child_policy_ = MakeOrphanable<ChildPolicyHandler>(std::move(create_args),
&rls_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "], created new child policy handler "
<< child_policy_.get();
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] ChildPolicyWrapper=" << this
<< " [" << target_ << "], created new child policy handler "
<< child_policy_.get();
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
lb_policy_->interested_parties());
}
@ -949,13 +945,12 @@ absl::Status RlsLb::ChildPolicyWrapper::MaybeFinishUpdate() {
void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << wrapper_->lb_policy_.get()
<< "] ChildPolicyWrapper=" << wrapper_.get() << " ["
<< wrapper_->target_ << "] ChildPolicyHelper=" << this
<< ": UpdateState(state=" << ConnectivityStateName(state)
<< ", status=" << status << ", picker=" << picker.get() << ")";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << wrapper_->lb_policy_.get()
<< "] ChildPolicyWrapper=" << wrapper_.get() << " [" << wrapper_->target_
<< "] ChildPolicyHelper=" << this
<< ": UpdateState(state=" << ConnectivityStateName(state)
<< ", status=" << status << ", picker=" << picker.get() << ")";
if (wrapper_->is_shutdown_) return;
{
MutexLock lock(&wrapper_->lb_policy_->mu_);
@ -1162,13 +1157,12 @@ void RlsLb::Cache::Entry::BackoffTimer::Orphan() {
if (backoff_timer_task_handle_.has_value() &&
entry_->lb_policy_->channel_control_helper()->GetEventEngine()->Cancel(
*backoff_timer_task_handle_)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer canceled";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer canceled";
}
backoff_timer_task_handle_.reset();
Unref(DEBUG_LOCATION, "Orphan");
@ -1177,13 +1171,12 @@ void RlsLb::Cache::Entry::BackoffTimer::Orphan() {
void RlsLb::Cache::Entry::BackoffTimer::OnBackoffTimerLocked() {
{
MutexLock lock(&entry_->lb_policy_->mu_);
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer fired";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << entry_->lb_policy_.get()
<< "] cache entry=" << entry_.get() << " "
<< (entry_->is_shutdown_ ? "(shut down)"
: entry_->lru_iterator_->ToString())
<< ", backoff timer fired";
// Skip the update if Orphaned
if (!backoff_timer_task_handle_.has_value()) return;
backoff_timer_task_handle_.reset();
@ -1248,28 +1241,25 @@ LoadBalancingPolicy::PickResult RlsLb::Cache::Entry::Pick(PickArgs args) {
if (child_policy_wrapper->connectivity_state() ==
GRPC_CHANNEL_TRANSIENT_FAILURE &&
i < child_policy_wrappers_.size() - 1) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size()
<< ") in state TRANSIENT_FAILURE; skipping";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size()
<< ") in state TRANSIENT_FAILURE; skipping";
continue;
}
break;
}
// Child policy not in TRANSIENT_FAILURE or is the last target in
// the list, so delegate.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size() << ") in state "
<< ConnectivityStateName(
child_policy_wrapper->connectivity_state())
<< "; delegating";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": target "
<< child_policy_wrapper->target() << " (" << i << " of "
<< child_policy_wrappers_.size() << ") in state "
<< ConnectivityStateName(child_policy_wrapper->connectivity_state())
<< "; delegating";
auto pick_result = child_policy_wrapper->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricTargetPicks,
child_policy_wrapper->target(), pick_result);

@ -185,12 +185,11 @@ RoundRobin::Picker::Picker(
// the picker, see https://github.com/grpc/grpc-go/issues/2580.
size_t index = absl::Uniform<size_t>(parent->bit_gen_, 0, pickers_.size());
last_picked_index_.store(index, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << parent_ << " picker " << this
<< "] created picker from endpoint_list="
<< parent_->endpoint_list_.get() << " with " << pickers_.size()
<< " READY children; last_picked_index_=" << index;
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << parent_ << " picker " << this
<< "] created picker from endpoint_list=" << parent_->endpoint_list_.get()
<< " with " << pickers_.size()
<< " READY children; last_picked_index_=" << index;
}
RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs args) {
@ -292,16 +291,13 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
grpc_connectivity_state new_state, const absl::Status& status) {
auto* rr_endpoint_list = endpoint_list<RoundRobinEndpointList>();
auto* round_robin = policy<RoundRobin>();
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin << "] connectivity changed for child "
<< this << ", endpoint_list " << rr_endpoint_list << " (index "
<< Index() << " of " << rr_endpoint_list->size()
<< "): prev_state="
<< (old_state.has_value() ? ConnectivityStateName(*old_state)
: "N/A")
<< " new_state=" << ConnectivityStateName(new_state) << " ("
<< status << ")";
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] connectivity changed for child " << this
<< ", endpoint_list " << rr_endpoint_list << " (index " << Index()
<< " of " << rr_endpoint_list->size() << "): prev_state="
<< (old_state.has_value() ? ConnectivityStateName(*old_state) : "N/A")
<< " new_state=" << ConnectivityStateName(new_state) << " (" << status
<< ")";
if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] child " << this

@ -447,28 +447,24 @@ void WeightedRoundRobin::EndpointWeight::MaybeUpdateWeight(
weight = qps / (utilization + penalty);
}
if (weight == 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps
<< ", utilization=" << utilization
<< ": error_util_penalty=" << error_utilization_penalty
<< ", weight=" << weight << " (not updating)";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps << ", utilization=" << utilization
<< ": error_util_penalty=" << error_utilization_penalty
<< ", weight=" << weight << " (not updating)";
return;
}
Timestamp now = Timestamp::Now();
// Grab the lock and update the data.
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps
<< ", utilization=" << utilization
<< " error_util_penalty=" << error_utilization_penalty
<< " : setting weight=" << weight << " weight_=" << weight_
<< " now=" << now.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString();
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": qps=" << qps << ", eps=" << eps << ", utilization=" << utilization
<< " error_util_penalty=" << error_utilization_penalty
<< " : setting weight=" << weight << " weight_=" << weight_
<< " now=" << now.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString();
if (non_empty_since_ == Timestamp::InfFuture()) non_empty_since_ = now;
weight_ = weight;
last_update_time_ = now;
@ -478,16 +474,14 @@ float WeightedRoundRobin::EndpointWeight::GetWeight(
Timestamp now, Duration weight_expiration_period, Duration blackout_period,
uint64_t* num_not_yet_usable, uint64_t* num_stale) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": getting weight: now=" << now.ToString()
<< " weight_expiration_period="
<< weight_expiration_period.ToString()
<< " blackout_period=" << blackout_period.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString()
<< " weight_=" << weight_;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << "] subchannel " << key_.ToString()
<< ": getting weight: now=" << now.ToString()
<< " weight_expiration_period=" << weight_expiration_period.ToString()
<< " blackout_period=" << blackout_period.ToString()
<< " last_update_time_=" << last_update_time_.ToString()
<< " non_empty_since_=" << non_empty_since_.ToString()
<< " weight_=" << weight_;
// If the most recent update was longer ago than the expiration
// period, reset non_empty_since_ so that we apply the blackout period
// again if we start getting data again in the future, and return 0.
@ -865,16 +859,13 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
grpc_connectivity_state new_state, const absl::Status& status) {
auto* wrr_endpoint_list = endpoint_list<WrrEndpointList>();
auto* wrr = policy<WeightedRoundRobin>();
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr << "] connectivity changed for child " << this
<< ", endpoint_list " << wrr_endpoint_list << " (index "
<< Index() << " of " << wrr_endpoint_list->size()
<< "): prev_state="
<< (old_state.has_value() ? ConnectivityStateName(*old_state)
: "N/A")
<< " new_state=" << ConnectivityStateName(new_state) << " ("
<< status << ")";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] connectivity changed for child " << this
<< ", endpoint_list " << wrr_endpoint_list << " (index " << Index()
<< " of " << wrr_endpoint_list->size() << "): prev_state="
<< (old_state.has_value() ? ConnectivityStateName(*old_state) : "N/A")
<< " new_state=" << ConnectivityStateName(new_state) << " (" << status
<< ")";
if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] child " << this

@ -402,13 +402,10 @@ void WeightedTargetLb::UpdateStateLocked() {
continue;
}
auto child_picker = child->picker();
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] child=" << child_name
<< " state="
<< ConnectivityStateName(child->connectivity_state())
<< " weight=" << child->weight()
<< " picker=" << child_picker.get();
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] child=" << child_name
<< " state=" << ConnectivityStateName(child->connectivity_state())
<< " weight=" << child->weight() << " picker=" << child_picker.get();
switch (child->connectivity_state()) {
case GRPC_CHANNEL_READY: {
CHECK_GT(child->weight(), 0u);
@ -488,13 +485,11 @@ WeightedTargetLb::WeightedChild::DelayedRemovalTimer::DelayedRemovalTimer(
void WeightedTargetLb::WeightedChild::DelayedRemovalTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb "
<< weighted_child_->weighted_target_policy_.get()
<< "] WeightedChild " << weighted_child_.get() << " "
<< weighted_child_->name_
<< ": cancelling delayed removal timer";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb "
<< weighted_child_->weighted_target_policy_.get() << "] WeightedChild "
<< weighted_child_.get() << " " << weighted_child_->name_
<< ": cancelling delayed removal timer";
weighted_child_->weighted_target_policy_->channel_control_helper()
->GetEventEngine()
->Cancel(*timer_handle_);
@ -619,13 +614,11 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
RefCountedPtr<SubchannelPicker> picker) {
// Cache the picker in the WeightedChild.
picker_ = std::move(picker);
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": connectivity state update: state="
<< ConnectivityStateName(state) << " (" << status
<< ") picker=" << picker_.get();
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": connectivity state update: state=" << ConnectivityStateName(state)
<< " (" << status << ") picker=" << picker_.get();
// If the child reports IDLE, immediately tell it to exit idle.
if (state == GRPC_CHANNEL_IDLE) child_policy_->ExitIdleLocked();
// Decide what state to report for aggregation purposes.

@ -741,12 +741,10 @@ void XdsClusterImplLb::MaybeUpdatePickerLocked() {
// Otherwise, update only if we have a child picker.
if (picker_ != nullptr) {
auto drop_picker = MakeRefCounted<Picker>(this, picker_);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] updating connectivity: state="
<< ConnectivityStateName(state_) << " status=(" << status_
<< ") picker=" << drop_picker.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] updating connectivity: state=" << ConnectivityStateName(state_)
<< " status=(" << status_ << ") picker=" << drop_picker.get();
channel_control_helper()->UpdateState(state_, status_,
std::move(drop_picker));
}
@ -841,12 +839,11 @@ void XdsClusterImplLb::Helper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (parent()->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << parent()
<< "] child connectivity state update: state="
<< ConnectivityStateName(state) << " (" << status
<< ") picker=" << picker.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << parent()
<< "] child connectivity state update: state="
<< ConnectivityStateName(state) << " (" << status
<< ") picker=" << picker.get();
// Save the state and picker.
parent()->state_ = state;
parent()->status_ = status;

@ -531,13 +531,12 @@ void XdsClusterManagerLb::ClusterChild::OnDelayedRemovalTimerLocked() {
void XdsClusterManagerLb::ClusterChild::Helper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
RefCountedPtr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb "
<< xds_cluster_manager_child_->xds_cluster_manager_policy_.get()
<< "] child " << xds_cluster_manager_child_->name_
<< ": received update: state=" << ConnectivityStateName(state)
<< " (" << status << ") picker=" << picker.get();
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb "
<< xds_cluster_manager_child_->xds_cluster_manager_policy_.get()
<< "] child " << xds_cluster_manager_child_->name_
<< ": received update: state=" << ConnectivityStateName(state) << " ("
<< status << ") picker=" << picker.get();
if (xds_cluster_manager_child_->xds_cluster_manager_policy_->shutting_down_) {
return;
}

@ -759,13 +759,10 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
args_ = std::move(args.args);
override_host_status_set_ = it->second->cluster->override_host_statuses;
connection_idle_timeout_ = it->second->cluster->connection_idle_timeout;
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] override host status set: "
<< override_host_status_set_.ToString()
<< " connection idle timeout: "
<< connection_idle_timeout_.ToString();
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] override host status set: " << override_host_status_set_.ToString()
<< " connection idle timeout: " << connection_idle_timeout_.ToString();
// Update address map and wrap endpoint iterator for child policy.
if (args.addresses.ok()) {
UpdateAddressMap(**args.addresses);
@ -796,12 +793,11 @@ void XdsOverrideHostLb::MaybeUpdatePickerLocked() {
if (picker_ != nullptr) {
auto xds_override_host_picker = MakeRefCounted<Picker>(
RefAsSubclass<XdsOverrideHostLb>(), picker_, override_host_status_set_);
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] updating connectivity: state="
<< ConnectivityStateName(state_) << " status=(" << status_
<< ") picker=" << xds_override_host_picker.get();
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] updating connectivity: state=" << ConnectivityStateName(state_)
<< " status=(" << status_
<< ") picker=" << xds_override_host_picker.get();
channel_control_helper()->UpdateState(state_, status_,
std::move(xds_override_host_picker));
}
@ -843,12 +839,11 @@ void XdsOverrideHostLb::UpdateAddressMap(
// Skip draining hosts if not in the override status set.
if (status.status() == XdsHealthStatus::kDraining &&
!override_host_status_set_.Contains(status)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] endpoint "
<< endpoint.ToString()
<< ": draining but not in override_host_status set -- "
"ignoring";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] endpoint "
<< endpoint.ToString()
<< ": draining but not in override_host_status set -- "
"ignoring";
return;
}
std::vector<std::string> addresses;
@ -905,13 +900,11 @@ void XdsOverrideHostLb::UpdateAddressMap(
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] map key " << address
<< ": setting "
<< "eds_health_status="
<< address_info.eds_health_status.ToString()
<< " address_list=" << address_info.address_list.c_str();
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] map key " << address
<< ": setting "
<< "eds_health_status=" << address_info.eds_health_status.ToString()
<< " address_list=" << address_info.address_list.c_str();
it->second->set_eds_health_status(address_info.eds_health_status);
it->second->set_address_list(std::move(address_info.address_list));
// Check the entry's last_used_time to determine the next time at

@ -1076,12 +1076,11 @@ void XdsResolver::GenerateResult() {
Result result;
result.addresses.emplace();
result.service_config = CreateServiceConfig();
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] generated service config: "
<< (result.service_config.ok()
? ((*result.service_config)->json_string())
: result.service_config.status().ToString());
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] generated service config: "
<< (result.service_config.ok()
? ((*result.service_config)->json_string())
: result.service_config.status().ToString());
result.args =
args_.SetObject(xds_client_.Ref(DEBUG_LOCATION, "xds resolver result"))
.SetObject(config_selector)

@ -250,16 +250,14 @@ class XdsClient::XdsChannel::AdsCall final
}
void OnTimer() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << ads_call_->xds_client()
<< "] xds server "
<< ads_call_->xds_channel()->server_.server_uri()
<< ": timeout obtaining resource {type=" << type_->type_url()
<< " name="
<< XdsClient::ConstructFullXdsResourceName(
name_.authority, type_->type_url(), name_.key)
<< "} from xds server";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << ads_call_->xds_client() << "] xds server "
<< ads_call_->xds_channel()->server_.server_uri()
<< ": timeout obtaining resource {type=" << type_->type_url()
<< " name="
<< XdsClient::ConstructFullXdsResourceName(
name_.authority, type_->type_url(), name_.key)
<< "} from xds server";
{
MutexLock lock(&ads_call_->xds_client()->mu_);
timer_handle_.reset();
@ -571,13 +569,10 @@ bool XdsClient::XdsChannel::MaybeFallbackLocked(
type_resource.first, {authority, key_state.first});
}
}
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] authority "
<< authority << ": added fallback server "
<< xds_servers[i]->server_uri() << " ("
<< authority_state.xds_channels.back()->status().ToString()
<< ")";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] authority " << authority
<< ": added fallback server " << xds_servers[i]->server_uri() << " ("
<< authority_state.xds_channels.back()->status().ToString() << ")";
if (authority_state.xds_channels.back()->status().ok()) return true;
}
GRPC_TRACE_LOG(xds_client, INFO)
@ -723,12 +718,11 @@ void XdsClient::XdsChannel::RetryableCall<T>::StartRetryTimerLocked() {
const Timestamp next_attempt_time = backoff_.NextAttemptTime();
const Duration timeout =
std::max(next_attempt_time - Timestamp::Now(), Duration::Zero());
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_channel()->xds_client()
<< "] xds server " << xds_channel()->server_.server_uri()
<< ": call attempt failed; retry timer will fire in "
<< timeout.millis() << "ms.";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_channel()->xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": call attempt failed; retry timer will fire in " << timeout.millis()
<< "ms.";
timer_handle_ = xds_channel()->xds_client()->engine()->RunAfter(
timeout,
[self = this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start")]() {
@ -779,13 +773,12 @@ class XdsClient::XdsChannel::AdsCall::AdsReadDelayHandle final
absl::Status
XdsClient::XdsChannel::AdsCall::AdsResponseParser::ProcessAdsResponseFields(
AdsResponseFields fields) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << ads_call_->xds_client() << "] xds server "
<< ads_call_->xds_channel()->server_.server_uri()
<< ": received ADS response: type_url=" << fields.type_url
<< ", version=" << fields.version << ", nonce=" << fields.nonce
<< ", num_resources=" << fields.num_resources;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << ads_call_->xds_client() << "] xds server "
<< ads_call_->xds_channel()->server_.server_uri()
<< ": received ADS response: type_url=" << fields.type_url
<< ", version=" << fields.version << ", nonce=" << fields.nonce
<< ", num_resources=" << fields.num_resources;
result_.type =
ads_call_->xds_client()->GetResourceTypeLocked(fields.type_url);
if (result_.type == nullptr) {
@ -991,12 +984,11 @@ XdsClient::XdsChannel::AdsCall::AdsCall(
RefCountedPtr<AdsCall>(this)));
CHECK(streaming_call_ != nullptr);
// Start the call.
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": starting ADS call (ads_call: " << this
<< ", streaming_call: " << streaming_call_.get() << ")";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": starting ADS call (ads_call: " << this
<< ", streaming_call: " << streaming_call_.get() << ")";
// If this is a reconnect, add any necessary subscriptions from what's
// already in the cache.
for (auto& a : xds_client()->authority_state_map_) {
@ -1044,13 +1036,12 @@ void XdsClient::XdsChannel::AdsCall::SendMessageLocked(
state.nonce, ResourceNamesForRequest(type), state.status,
!sent_initial_message_);
sent_initial_message_ = true;
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": sending ADS request: type=" << type->type_url()
<< " version=" << xds_channel()->resource_type_version_map_[type]
<< " nonce=" << state.nonce << " error=" << state.status;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": sending ADS request: type=" << type->type_url()
<< " version=" << xds_channel()->resource_type_version_map_[type]
<< " nonce=" << state.nonce << " error=" << state.status;
state.status = absl::OkStatus();
streaming_call_->SendMessage(std::move(serialized_message));
send_message_pending_ = type;
@ -1228,14 +1219,12 @@ void XdsClient::XdsChannel::AdsCall::OnRecvMessage(absl::string_view payload) {
void XdsClient::XdsChannel::AdsCall::OnStatusReceived(absl::Status status) {
{
MutexLock lock(&xds_client()->mu_);
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": ADS call status received (xds_channel=" << xds_channel()
<< ", ads_call=" << this
<< ", streaming_call=" << streaming_call_.get()
<< "): " << status;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": ADS call status received (xds_channel=" << xds_channel()
<< ", ads_call=" << this << ", streaming_call=" << streaming_call_.get()
<< "): " << status;
// Cancel any does-not-exist timers that may be pending.
for (const auto& p : state_map_) {
for (const auto& q : p.second.subscribed_resources) {
@ -1301,12 +1290,11 @@ void XdsClient::XdsChannel::LrsCall::Timer::Orphan() {
}
void XdsClient::XdsChannel::LrsCall::Timer::ScheduleNextReportLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< lrs_call_->xds_channel()->server_.server_uri()
<< ": scheduling next load report in "
<< lrs_call_->load_reporting_interval_;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< lrs_call_->xds_channel()->server_.server_uri()
<< ": scheduling next load report in "
<< lrs_call_->load_reporting_interval_;
timer_handle_ = xds_client()->engine()->RunAfter(
lrs_call_->load_reporting_interval_,
[self = Ref(DEBUG_LOCATION, "timer")]() {
@ -1344,12 +1332,11 @@ XdsClient::XdsChannel::LrsCall::LrsCall(
RefCountedPtr<LrsCall>(this)));
CHECK(streaming_call_ != nullptr);
// Start the call.
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": starting LRS call (lrs_call=" << this
<< ", streaming_call=" << streaming_call_.get() << ")";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": starting LRS call (lrs_call=" << this
<< ", streaming_call=" << streaming_call_.get() << ")";
// Send the initial request.
std::string serialized_payload = xds_client()->api_.CreateLrsInitialRequest();
SendMessageLocked(std::move(serialized_payload));
@ -1473,12 +1460,11 @@ void XdsClient::XdsChannel::LrsCall::OnRecvMessage(absl::string_view payload) {
Duration::Milliseconds(GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS)) {
new_load_reporting_interval =
Duration::Milliseconds(GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": increased load_report_interval to minimum value "
<< GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS << "ms";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": increased load_report_interval to minimum value "
<< GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS << "ms";
}
// Ignore identical update.
if (send_all_clusters == send_all_clusters_ &&
@ -1506,14 +1492,12 @@ void XdsClient::XdsChannel::LrsCall::OnRecvMessage(absl::string_view payload) {
void XdsClient::XdsChannel::LrsCall::OnStatusReceived(absl::Status status) {
MutexLock lock(&xds_client()->mu_);
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": LRS call status received (xds_channel=" << xds_channel()
<< ", lrs_call=" << this
<< ", streaming_call=" << streaming_call_.get()
<< "): " << status;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": LRS call status received (xds_channel=" << xds_channel()
<< ", lrs_call=" << this << ", streaming_call=" << streaming_call_.get()
<< "): " << status;
// Ignore status from a stale call.
if (IsCurrentCallOnChannel()) {
// Try to restart the call.
@ -2076,14 +2060,12 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
if (locality_state.locality_stats != nullptr) {
locality_snapshot +=
locality_state.locality_stats->GetSnapshotAndReset();
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] cluster=" << cluster_key.first.c_str()
<< " eds_service_name=" << cluster_key.second.c_str()
<< " locality="
<< locality_name->human_readable_string().c_str()
<< " locality_stats=" << locality_state.locality_stats;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] cluster=" << cluster_key.first.c_str()
<< " eds_service_name=" << cluster_key.second.c_str()
<< " locality=" << locality_name->human_readable_string().c_str()
<< " locality_stats=" << locality_state.locality_stats;
}
// If the only thing left in this entry was final snapshots from
// deleted locality stats objects, remove the entry.

@ -100,26 +100,21 @@ XdsClusterLocalityStats::XdsClusterLocalityStats(
cluster_name_(cluster_name),
eds_service_name_(eds_service_name),
name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get()
<< "] created locality stats " << this << " for {" << lrs_server_
<< ", " << cluster_name_ << ", " << eds_service_name_ << ", "
<< (name_ == nullptr ? "<none>"
: name_->human_readable_string().c_str())
<< "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] created locality stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << ", "
<< (name_ == nullptr ? "<none>" : name_->human_readable_string().c_str())
<< "}";
}
XdsClusterLocalityStats::~XdsClusterLocalityStats() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get()
<< "] destroying locality stats " << this << " for {"
<< lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << ", "
<< (name_ == nullptr ? "<none>"
: name_->human_readable_string().c_str())
<< "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] destroying locality stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << ", "
<< (name_ == nullptr ? "<none>" : name_->human_readable_string().c_str())
<< "}";
xds_client_->RemoveClusterLocalityStats(lrs_server_, cluster_name_,
eds_service_name_, name_, this);
xds_client_.reset(DEBUG_LOCATION, "LocalityStats");

@ -248,14 +248,12 @@ class EnvironmentAutoDetectHelper
queries_.push_back(grpc_core::MakeOrphanable<grpc_core::GcpMetadataQuery>(
element.first, &pollent_,
[this](std::string attribute, absl::StatusOr<std::string> result) {
if (GRPC_TRACE_FLAG_ENABLED(environment_autodetect)) {
LOG(INFO) << "Environment AutoDetect: Attribute: \"" << attribute
<< "\" Result: \""
<< (result.ok()
? result.value()
GRPC_TRACE_LOG(environment_autodetect, INFO)
<< "Environment AutoDetect: Attribute: \"" << attribute
<< "\" Result: \""
<< (result.ok() ? result.value()
: grpc_core::StatusToString(result.status()))
<< "\"";
}
<< "\"";
absl::optional<EnvironmentAutoDetect::ResourceType> resource;
{
grpc_core::MutexLock lock(&mu_);

@ -367,15 +367,13 @@ BackendMetricData BackendMetricState::GetBackendMetricData() {
data.named_metrics[r.first] = r.second;
}
}
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this
<< "] Backend metric data returned: cpu:" << data.cpu_utilization
<< " mem:" << data.mem_utilization << " qps:" << data.qps
<< " eps:" << data.eps
<< " utilization size:" << data.utilization.size()
<< " request_cost size:" << data.request_cost.size()
<< "named_metrics size:" << data.named_metrics.size();
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this
<< "] Backend metric data returned: cpu:" << data.cpu_utilization
<< " mem:" << data.mem_utilization << " qps:" << data.qps
<< " eps:" << data.eps << " utilization size:" << data.utilization.size()
<< " request_cost size:" << data.request_cost.size()
<< "named_metrics size:" << data.named_metrics.size();
return data;
}

Loading…
Cancel
Save