[Gpr_To_Absl_Logging]

pull/37349/head
tanvi-jagtap 9 months ago
parent a10c7a8d4f
commit 70ca4b8dce
  1. 25
      src/core/load_balancing/grpclb/grpclb.cc
  2. 8
      src/core/load_balancing/health_check_client.cc
  3. 4
      src/core/load_balancing/oob_backend_metric.cc
  4. 44
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  5. 61
      src/core/load_balancing/pick_first/pick_first.cc
  6. 22
      src/core/load_balancing/priority/priority.cc
  7. 5
      src/core/load_balancing/ring_hash/ring_hash.cc
  8. 80
      src/core/load_balancing/rls/rls.cc
  9. 19
      src/core/load_balancing/round_robin/round_robin.cc
  10. 43
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  11. 21
      src/core/load_balancing/weighted_target/weighted_target.cc
  12. 14
      src/core/load_balancing/xds/cds.cc
  13. 24
      src/core/load_balancing/xds/xds_cluster_impl.cc
  14. 14
      src/core/load_balancing/xds/xds_cluster_manager.cc
  15. 76
      src/core/load_balancing/xds/xds_override_host.cc
  16. 9
      src/core/load_balancing/xds/xds_wrr_locality.cc

@ -949,8 +949,8 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() { void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr); CHECK_NE(lb_call_, nullptr);
GRPC_TRACE_LOG(glb, INFO) GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this < < < < << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
": Starting LB call " << lb_call_; << ": Starting LB call " << lb_call_;
// Create the ops. // Create the ops.
grpc_call_error call_error; grpc_call_error call_error;
grpc_op ops[3]; grpc_op ops[3];
@ -1526,8 +1526,9 @@ class GrpcLb::NullLbTokenEndpointIterator final
void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback) void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback)
const override { const override {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) { parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this < < < < GRPC_TRACE_LOG(glb, INFO)
"] fallback address: " << endpoint.ToString(); << "[grpclb " << this
<< "] fallback address: " << endpoint.ToString();
callback(EndpointAddresses(endpoint.addresses(), callback(EndpointAddresses(endpoint.addresses(),
endpoint.args().SetObject(empty_token_))); endpoint.args().SetObject(empty_token_)));
}); });
@ -1762,9 +1763,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper")); std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy = OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace); MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace);
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this GRPC_TRACE_LOG(glb, INFO)
<< "] Created new child policy handler (" < < < < << "[grpclb " << this << "] Created new child policy handler ("
lb_policy.get() << ")"; << lb_policy.get() << ")";
// Add the gRPC LB's interested_parties pollset_set to that of the newly // Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon // created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call. // activity on gRPC LB, which in turn is tied to the application's call.
@ -1816,8 +1817,8 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
} }
// Update the policy. // Update the policy.
GRPC_TRACE_LOG(glb, INFO) GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Updating child policy handler " < < < < << "[grpclb " << this << "] Updating child policy handler "
child_policy_.get(); << child_policy_.get();
// TODO(roth): If we're in fallback mode and the child policy rejects the // TODO(roth): If we're in fallback mode and the child policy rejects the
// update, we should propagate that failure back to the resolver somehow. // update, we should propagate that failure back to the resolver somehow.
(void)child_policy_->UpdateLocked(std::move(update_args)); (void)child_policy_->UpdateLocked(std::move(update_args));
@ -1859,9 +1860,9 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
subchannel_cache_timer_handle_.reset(); subchannel_cache_timer_handle_.reset();
auto it = cached_subchannels_.begin(); auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) { if (it != cached_subchannels_.end()) {
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this << "] removing " GRPC_TRACE_LOG(glb, INFO)
<< it->second.size() < < < < << "[grpclb " << this << "] removing " << it->second.size()
" subchannels from cache"; << " subchannels from cache";
cached_subchannels_.erase(it); cached_subchannels_.erase(it);
} }
if (!cached_subchannels_.empty()) { if (!cached_subchannels_.empty()) {

@ -342,8 +342,8 @@ class HealthProducer::ConnectivityWatcher final
void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) { void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
GRPC_TRACE_LOG(health_check_client, INFO) GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this << ": starting with subchannel " < < < < << "HealthProducer " << this << ": starting with subchannel "
subchannel.get(); << subchannel.get();
subchannel_ = std::move(subchannel); subchannel_ = std::move(subchannel);
{ {
MutexLock lock(&mu_); MutexLock lock(&mu_);
@ -498,8 +498,8 @@ MakeHealthCheckWatcher(
args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME); args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME);
} }
GRPC_TRACE_LOG(health_check_client, INFO) GRPC_TRACE_LOG(health_check_client, INFO)
<< "creating HealthWatcher -- health_check_service_name=\"" < < < < << "creating HealthWatcher -- health_check_service_name=\""
health_check_service_name.value_or("N/A") << "\""; << health_check_service_name.value_or("N/A") << "\"";
return std::make_unique<HealthWatcher>(std::move(work_serializer), return std::make_unique<HealthWatcher>(std::move(work_serializer),
std::move(health_check_service_name), std::move(health_check_service_name),
std::move(watcher)); std::move(watcher));

@ -274,8 +274,8 @@ void OrcaProducer::MaybeStartStreamLocked() {
void OrcaProducer::NotifyWatchers( void OrcaProducer::NotifyWatchers(
const BackendMetricData& backend_metric_data) { const BackendMetricData& backend_metric_data) {
GRPC_TRACE_LOG(orca_client, INFO) << "OrcaProducer " << this < < < < GRPC_TRACE_LOG(orca_client, INFO)
": reporting backend metrics to watchers"; << "OrcaProducer " << this << ": reporting backend metrics to watchers";
MutexLock lock(&mu_); MutexLock lock(&mu_);
for (OrcaWatcher* watcher : watchers_) { for (OrcaWatcher* watcher : watchers_) {
watcher->watcher()->OnBackendMetricReport(backend_metric_data); watcher->watcher()->OnBackendMetricReport(backend_metric_data);

@ -581,8 +581,8 @@ OutlierDetectionLb::OutlierDetectionLb(Args args)
OutlierDetectionLb::~OutlierDetectionLb() { OutlierDetectionLb::~OutlierDetectionLb() {
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] destroying outlier_detection LB policy"; << "] destroying outlier_detection LB policy";
} }
void OutlierDetectionLb::ShutdownLocked() { void OutlierDetectionLb::ShutdownLocked() {
@ -622,8 +622,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
if (!config_->CountingEnabled()) { if (!config_->CountingEnabled()) {
// No need for timer. Cancel the current timer, if any. // No need for timer. Cancel the current timer, if any.
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] counting disabled, cancelling timer"; << "] counting disabled, cancelling timer";
ejection_timer_.reset(); ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) { } else if (ejection_timer_ == nullptr) {
// No timer running. Start it now. // No timer running. Start it now.
@ -642,8 +642,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Note that if the new deadline is in the past, the timer will fire // Note that if the new deadline is in the past, the timer will fire
// immediately. // immediately.
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] interval changed, replacing timer"; << "] interval changed, replacing timer";
ejection_timer_ = MakeOrphanable<EjectionTimer>( ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime()); RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime());
} }
@ -661,8 +661,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
auto it = endpoint_state_map_.find(key); auto it = endpoint_state_map_.find(key);
if (it == endpoint_state_map_.end()) { if (it == endpoint_state_map_.end()) {
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] adding endpoint entry for " << key.ToString(); << "] adding endpoint entry for " << key.ToString();
// The endpoint is not present in the map, so we'll need to add it. // The endpoint is not present in the map, so we'll need to add it.
// Start by getting a pointer to the entry for each address in the // Start by getting a pointer to the entry for each address in the
// subchannel map, creating the entry if needed. // subchannel map, creating the entry if needed.
@ -719,8 +719,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
it != endpoint_state_map_.end();) { it != endpoint_state_map_.end();) {
if (current_endpoints.find(it->first) == current_endpoints.end()) { if (current_endpoints.find(it->first) == current_endpoints.end()) {
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] removing endpoint map entry " << it->first.ToString(); << "] removing endpoint map entry " << it->first.ToString();
it = endpoint_state_map_.erase(it); it = endpoint_state_map_.erase(it);
} else { } else {
++it; ++it;
@ -738,8 +738,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
update_args.config = config_->child_policy(); update_args.config = config_->child_policy();
update_args.args = std::move(args.args); update_args.args = std::move(args.args);
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this << "] Updating child policy handler "
"] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -769,8 +769,8 @@ OrphanablePtr<LoadBalancingPolicy> OutlierDetectionLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&outlier_detection_lb_trace); &outlier_detection_lb_trace);
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < < << "[outlier_detection_lb " << this
"] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -836,8 +836,8 @@ OutlierDetectionLb::EjectionTimer::EjectionTimer(
: parent_(std::move(parent)), start_time_(start_time) { : parent_(std::move(parent)), start_time_(start_time) {
auto interval = parent_->config_->outlier_detection_config().interval; auto interval = parent_->config_->outlier_detection_config().interval;
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < < << "[outlier_detection_lb " << parent_.get()
"] ejection timer will run in " << interval.ToString(); << "] ejection timer will run in " << interval.ToString();
timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter( timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter(
interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable { interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx; ApplicationCallbackExecCtx callback_exec_ctx;
@ -861,8 +861,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!timer_handle_.has_value()) return; if (!timer_handle_.has_value()) return;
timer_handle_.reset(); timer_handle_.reset();
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < < << "[outlier_detection_lb " << parent_.get()
"] ejection timer running"; << "] ejection timer running";
std::map<EndpointState*, double> success_rate_ejection_candidates; std::map<EndpointState*, double> success_rate_ejection_candidates;
std::map<EndpointState*, double> failure_percentage_ejection_candidates; std::map<EndpointState*, double> failure_percentage_ejection_candidates;
size_t ejected_host_count = 0; size_t ejected_host_count = 0;
@ -959,8 +959,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
// Eject and record the timestamp for use when ejecting addresses in // Eject and record the timestamp for use when ejecting addresses in
// this iteration. // this iteration.
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < < << "[outlier_detection_lb " << parent_.get()
"] ejecting candidate"; << "] ejecting candidate";
candidate.first->Eject(time_now); candidate.first->Eject(time_now);
++ejected_host_count; ++ejected_host_count;
} }
@ -1005,8 +1005,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
// Eject and record the timestamp for use when ejecting addresses in // Eject and record the timestamp for use when ejecting addresses in
// this iteration. // this iteration.
GRPC_TRACE_LOG(outlier_detection_lb, INFO) GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < < << "[outlier_detection_lb " << parent_.get()
"] ejecting candidate"; << "] ejecting candidate";
candidate.first->Eject(time_now); candidate.first->Eject(time_now);
++ejected_host_count; ++ejected_host_count;
} }

@ -632,8 +632,9 @@ void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) { grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return; if (policy_->health_watcher_ != this) return;
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] health watch state update: " < < < < << "[PF " << policy_.get()
ConnectivityStateName(new_state) << " (" << status << ")"; << "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) { switch (new_state) {
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState( policy_->channel_control_helper()->UpdateState(
@ -671,9 +672,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
: subchannel_data_(subchannel_data), : subchannel_data_(subchannel_data),
pick_first_(subchannel_data_->subchannel_list_->policy_), pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) { subchannel_(std::move(subchannel)) {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << pick_first_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< "] subchannel state " << this < < < < << "[PF " << pick_first_.get() << "] subchannel state " << this
" (subchannel " << subchannel_.get() << "): starting watch"; << " (subchannel " << subchannel_.get() << "): starting watch";
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher")); auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get(); watcher_ = watcher.get();
subchannel_->WatchConnectivityState(std::move(watcher)); subchannel_->WatchConnectivityState(std::move(watcher));
@ -694,9 +695,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
} }
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() { void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << pick_first_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< " selected subchannel " < < < < << "Pick First " << pick_first_.get() << " selected subchannel "
subchannel_.get(); << subchannel_.get();
CHECK_NE(subchannel_data_, nullptr); CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any. pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_); pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_);
@ -1004,8 +1005,8 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
.Remove( .Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) { GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " < < < < << "[PF " << policy_.get() << "] Creating subchannel list " << this
this << " - channel args: " << args_.ToString(); << " - channel args: " << args_.ToString();
if (addresses == nullptr) return; if (addresses == nullptr) return;
// Create a subchannel for each address. // Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) { addresses->ForEach([&](const EndpointAddresses& address) {
@ -1034,14 +1035,12 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
PickFirst::SubchannelList::~SubchannelList() { PickFirst::SubchannelList::~SubchannelList() {
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " < < < < << "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
this;
} }
void PickFirst::SubchannelList::Orphan() { void PickFirst::SubchannelList::Orphan() {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << policy_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< "] Shutting down subchannel_list " < < < < << "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
this;
CHECK(!shutting_down_); CHECK(!shutting_down_);
shutting_down_ = true; shutting_down_ = true;
// Cancel Happy Eyeballs timer, if any. // Cancel Happy Eyeballs timer, if any.
@ -1083,9 +1082,9 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE, // We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to // so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel. // connect to all addresses in parallel.
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << policy_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< " subchannel list " << this < < < < << "Pick First " << policy_.get() << " subchannel list " << this
" failed to connect to all subchannels"; << " failed to connect to all subchannels";
// Re-resolve and report TRANSIENT_FAILURE. // Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution(); policy_->channel_control_helper()->RequestReresolution();
absl::Status status = absl::UnavailableError( absl::Status status = absl::UnavailableError(
@ -1558,8 +1557,9 @@ void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) { grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return; if (policy_->health_watcher_ != this) return;
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] health watch state update: " < < < < << "[PF " << policy_.get()
ConnectivityStateName(new_state) << " (" << status << ")"; << "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) { switch (new_state) {
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState( policy_->channel_control_helper()->UpdateState(
@ -1905,8 +1905,7 @@ void OldPickFirst::SubchannelList::SubchannelData::
} }
// Cases 1 and 2. // Cases 1 and 2.
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " selected subchannel " < < < < << "Pick First " << p << " selected subchannel " << subchannel_.get();
subchannel_.get();
p->selected_ = this; p->selected_ = this;
// If health checking is enabled, start the health watch, but don't // If health checking is enabled, start the health watch, but don't
// report a new picker -- we want to stay in CONNECTING while we wait // report a new picker -- we want to stay in CONNECTING while we wait
@ -1949,8 +1948,8 @@ OldPickFirst::SubchannelList::SubchannelList(
.Remove( .Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) { GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " < < < < << "[PF " << policy_.get() << "] Creating subchannel list " << this
this << " - channel args: " << args_.ToString(); << " - channel args: " << args_.ToString();
if (addresses == nullptr) return; if (addresses == nullptr) return;
// Create a subchannel for each address. // Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) { addresses->ForEach([&](const EndpointAddresses& address) {
@ -1978,14 +1977,12 @@ OldPickFirst::SubchannelList::SubchannelList(
OldPickFirst::SubchannelList::~SubchannelList() { OldPickFirst::SubchannelList::~SubchannelList() {
GRPC_TRACE_LOG(pick_first, INFO) GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " < < < < << "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
this;
} }
void OldPickFirst::SubchannelList::Orphan() { void OldPickFirst::SubchannelList::Orphan() {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << policy_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< "] Shutting down subchannel_list " < < < < << "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
this;
CHECK(!shutting_down_); CHECK(!shutting_down_);
shutting_down_ = true; shutting_down_ = true;
for (auto& sd : subchannels_) { for (auto& sd : subchannels_) {
@ -2029,9 +2026,9 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE, // We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to // so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel. // connect to all addresses in parallel.
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << policy_.get() GRPC_TRACE_LOG(pick_first, INFO)
<< " subchannel list " << this < < < < << "Pick First " << policy_.get() << " subchannel list " << this
" failed to connect to all subchannels"; << " failed to connect to all subchannels";
// In case 2, swap to the new subchannel list. This means reporting // In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection, // TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us. // but we can't ignore what the control plane has told us.

@ -403,9 +403,8 @@ void PriorityLb::ChoosePriorityLocked() {
// If the child for the priority does not exist yet, create it. // If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority]; const std::string& child_name = config_->priorities()[priority];
GRPC_TRACE_LOG(priority_lb, INFO) GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << "[priority_lb " << this << "] trying priority " << priority
<< priority < < < < << ", child " << child_name;
", child " << child_name;
auto& child = children_[child_name]; auto& child = children_[child_name];
// Create child if needed. // Create child if needed.
if (child == nullptr) { if (child == nullptr) {
@ -462,9 +461,8 @@ void PriorityLb::ChoosePriorityLocked() {
// If the child for the priority does not exist yet, create it. // If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority]; const std::string& child_name = config_->priorities()[priority];
GRPC_TRACE_LOG(priority_lb, INFO) GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << "[priority_lb " << this << "] trying priority " << priority
<< priority < < < < << ", child " << child_name;
", child " << child_name;
auto& child = children_[child_name]; auto& child = children_[child_name];
CHECK(child != nullptr); CHECK(child != nullptr);
if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) { if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) {
@ -627,16 +625,16 @@ PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name) RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) { : priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
GRPC_TRACE_LOG(priority_lb, INFO) GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() < < < < << "[priority_lb " << priority_policy_.get() << "] creating child "
"] creating child " << name_ << " (" << this << ")"; << name_ << " (" << this << ")";
// Start the failover timer. // Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref()); failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
} }
void PriorityLb::ChildPriority::Orphan() { void PriorityLb::ChildPriority::Orphan() {
GRPC_TRACE_LOG(priority_lb, INFO) GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " < < < < << "[priority_lb " << priority_policy_.get() << "] child " << name_
name_ << " (" << this << "): orphaned"; << " (" << this << "): orphaned";
failover_timer_.reset(); failover_timer_.reset();
deactivation_timer_.reset(); deactivation_timer_.reset();
// Remove the child policy's interested_parties pollset_set from the // Remove the child policy's interested_parties pollset_set from the
@ -664,8 +662,8 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
bool ignore_reresolution_requests) { bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus(); if (priority_policy_->shutting_down_) return absl::OkStatus();
GRPC_TRACE_LOG(priority_lb, INFO) GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " < < < < << "[priority_lb " << priority_policy_.get() << "] child " << name_
name_ << " (" << this << "): start update"; << " (" << this << "): start update";
ignore_reresolution_requests_ = ignore_reresolution_requests; ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed. // Create policy if needed.
if (child_policy_ == nullptr) { if (child_policy_ == nullptr) {

@ -656,9 +656,8 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
}); });
} else { } else {
GRPC_TRACE_LOG(ring_hash_lb, INFO) GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "[RH " << this << "] received update with addresses error: "
<< "] received update with addresses error: " < < < < << args.addresses.status();
args.addresses.status();
// If we already have an endpoint list, then keep using the existing // If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (!endpoints_.empty()) return args.addresses.status(); if (!endpoints_.empty()) return args.addresses.status();

@ -1040,8 +1040,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
lb_policy_->channel_control_helper()->GetAuthority(), lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)}; args.initial_metadata)};
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < < << "[rlslb " << lb_policy_.get() << "] picker=" << this
": request keys: " << key.ToString(); << ": request keys: " << key.ToString();
Timestamp now = Timestamp::Now(); Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_); MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) { if (lb_policy_->is_shutdown_) {
@ -1077,8 +1077,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
// If the entry has non-expired data, use it. // If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) { if (entry->data_expiration_time() >= now) {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < < << "[rlslb " << lb_policy_.get() << "] picker=" << this
": using cache entry " << entry; << ": using cache entry " << entry;
return entry->Pick(args); return entry->Pick(args);
} }
// If the entry is in backoff, then use the default target if set, // If the entry is in backoff, then use the default target if set,
@ -1092,25 +1092,25 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
} }
// RLS call pending. Queue the pick. // RLS call pending. Queue the pick.
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < < << "[rlslb " << lb_policy_.get() << "] picker=" << this
": RLS request pending; queuing pick"; << ": RLS request pending; queuing pick";
return PickResult::Queue(); return PickResult::Queue();
} }
LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail( LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) { const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) { if (default_child_policy_ != nullptr) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_.get() GRPC_TRACE_LOG(rls_lb, INFO)
<< "] picker=" << this << ": " < < < < << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
reason << "; using default target"; << reason << "; using default target";
auto pick_result = default_child_policy_->Pick(args); auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks, lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
config_->default_target(), pick_result); config_->default_target(), pick_result);
return pick_result; return pick_result;
} }
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_.get() GRPC_TRACE_LOG(rls_lb, INFO)
<< "] picker=" << this << ": " < < < < << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << reason
reason << "; failing pick"; << "; failing pick";
auto& stats_plugins = auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup(); lb_policy_->channel_control_helper()->GetStatsPluginGroup();
stats_plugins.AddCounter(kMetricFailedPicks, 1, stats_plugins.AddCounter(kMetricFailedPicks, 1,
@ -1200,8 +1200,8 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
void RlsLb::Cache::Entry::Orphan() { void RlsLb::Cache::Entry::Orphan() {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this < < < < << "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
" " << lru_iterator_->ToString() << ": cache entry evicted"; << lru_iterator_->ToString() << ": cache entry evicted";
is_shutdown_ = true; is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_); lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case. lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
@ -1398,22 +1398,21 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
map_.emplace(key, OrphanablePtr<Entry>(entry)); map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size; size_ += entry_size;
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString() < < < < << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
": cache entry added, entry=" << entry; << ": cache entry added, entry=" << entry;
return entry; return entry;
} }
// Entry found, so use it. // Entry found, so use it.
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString() < < < < << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
": found cache entry " << it->second.get(); << ": found cache entry " << it->second.get();
it->second->MarkUsed(); it->second->MarkUsed();
return it->second.get(); return it->second.get();
} }
void RlsLb::Cache::Resize(size_t bytes) { void RlsLb::Cache::Resize(size_t bytes) {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes < < < < << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
" bytes";
size_limit_ = bytes; size_limit_ = bytes;
MaybeShrinkSize(size_limit_); MaybeShrinkSize(size_limit_);
} }
@ -1498,9 +1497,9 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
auto map_it = map_.find(*lru_it); auto map_it = map_.find(*lru_it);
CHECK(map_it != map_.end()); CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break; if (!map_it->second->CanEvict()) break;
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_ GRPC_TRACE_LOG(rls_lb, INFO)
<< "] LRU eviction: removing entry " < < < < << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
map_it->second.get() << " " << lru_it->ToString(); << map_it->second.get() << " " << lru_it->ToString();
size_ -= map_it->second->Size(); size_ -= map_it->second->Size();
map_.erase(map_it); map_.erase(map_it);
} }
@ -1639,8 +1638,8 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
void RlsLb::RlsChannel::Orphan() { void RlsLb::RlsChannel::Orphan() {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this < < < < << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
", channel=" << channel_.get() << ": shutdown"; << ", channel=" << channel_.get() << ": shutdown";
is_shutdown_ = true; is_shutdown_ = true;
if (channel_ != nullptr) { if (channel_ != nullptr) {
// Remove channelz linkage. // Remove channelz linkage.
@ -1705,8 +1704,8 @@ RlsLb::RlsRequest::RlsRequest(
reason_(reason), reason_(reason),
stale_header_data_(std::move(stale_header_data)) { stale_header_data_(std::move(stale_header_data)) {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this < < < < << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
": RLS request created for key " << key_.ToString(); << ": RLS request created for key " << key_.ToString();
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr); GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run( ExecCtx::Run(
DEBUG_LOCATION, DEBUG_LOCATION,
@ -1720,8 +1719,8 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() { void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) { if (call_ != nullptr) {
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this < < < < << "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
" " << key_.ToString() << ": cancelling RLS call"; << key_.ToString() << ": cancelling RLS call";
grpc_call_cancel_internal(call_); grpc_call_cancel_internal(call_);
} }
Unref(DEBUG_LOCATION, "Orphan"); Unref(DEBUG_LOCATION, "Orphan");
@ -2018,8 +2017,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
config_->default_target()); config_->default_target());
created_default_child = true; created_default_child = true;
} else { } else {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < < GRPC_TRACE_LOG(rls_lb, INFO)
"] using existing child for default target"; << "[rlslb " << this << "] using existing child for default target";
default_child_policy_ = default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy"); it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
} }
@ -2048,8 +2047,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
p.second->StartUpdate(); p.second->StartUpdate();
} }
} else if (created_default_child) { } else if (created_default_child) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < < GRPC_TRACE_LOG(rls_lb, INFO)
"] starting default child policy update"; << "[rlslb " << this << "] starting default child policy update";
default_child_policy_->StartUpdate(); default_child_policy_->StartUpdate();
} }
} }
@ -2067,8 +2066,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
} }
} }
} else if (created_default_child) { } else if (created_default_child) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < < GRPC_TRACE_LOG(rls_lb, INFO)
"] finishing default child policy update"; << "[rlslb " << this << "] finishing default child policy update";
absl::Status status = default_child_policy_->MaybeFinishUpdate(); absl::Status status = default_child_policy_->MaybeFinishUpdate();
if (!status.ok()) { if (!status.ok()) {
errors.emplace_back(absl::StrCat("target ", config_->default_target(), errors.emplace_back(absl::StrCat("target ", config_->default_target(),
@ -2167,9 +2166,9 @@ void RlsLb::UpdatePickerLocked() {
if (is_shutdown_) return; if (is_shutdown_) return;
for (auto& p : child_policy_map_) { for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state(); grpc_connectivity_state child_state = p.second->connectivity_state();
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] target " GRPC_TRACE_LOG(rls_lb, INFO)
<< p.second->target() < < < < << "[rlslb " << this << "] target " << p.second->target()
" in state " << ConnectivityStateName(child_state); << " in state " << ConnectivityStateName(child_state);
if (child_state == GRPC_CHANNEL_READY) { if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY; state = GRPC_CHANNEL_READY;
break; break;
@ -2188,9 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
} }
} }
} }
GRPC_TRACE_LOG(rls_lb, INFO) GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] reporting state "
<< "[rlslb " << this << "] reporting state " < < < < << ConnectivityStateName(state);
ConnectivityStateName(state);
absl::Status status; absl::Status status;
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
status = absl::UnavailableError("no children available"); status = absl::UnavailableError("no children available");

@ -247,9 +247,8 @@ absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
addresses = args.addresses->get(); addresses = args.addresses->get();
} else { } else {
GRPC_TRACE_LOG(round_robin, INFO) GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << this << "[RR " << this
<< "] received update with address error: " < < < < << "] received update with address error: " << args.addresses.status();
args.addresses.status();
// If we already have a child list, then keep using the existing // If we already have a child list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status(); if (endpoint_list_ != nullptr) return args.addresses.status();
@ -314,8 +313,8 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
} }
if (new_state == GRPC_CHANNEL_IDLE) { if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(round_robin, INFO) GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] child " << this < < < < << "[RR " << round_robin << "] child " << this
" reported IDLE; requesting connection"; << " reported IDLE; requesting connection";
ExitIdleLocked(); ExitIdleLocked();
} }
// If state changed, update state counters. // If state changed, update state counters.
@ -396,9 +395,8 @@ void RoundRobin::RoundRobinEndpointList::
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE. // 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) { if (num_ready_ > 0) {
GRPC_TRACE_LOG(round_robin, INFO) GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "[RR " << round_robin << "] reporting READY with child list "
<< "] reporting READY with child list " < < < < << this;
this;
std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers; std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers;
for (const auto& endpoint : endpoints()) { for (const auto& endpoint : endpoints()) {
auto state = endpoint->connectivity_state(); auto state = endpoint->connectivity_state();
@ -411,8 +409,9 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_READY, absl::OkStatus(), GRPC_CHANNEL_READY, absl::OkStatus(),
MakeRefCounted<Picker>(round_robin, std::move(pickers))); MakeRefCounted<Picker>(round_robin, std::move(pickers)));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
GRPC_TRACE_LOG(round_robin, INFO) << "[RR " << round_robin < < < < GRPC_TRACE_LOG(round_robin, INFO)
"] reporting CONNECTING with child list " << this; << "[RR " << round_robin << "] reporting CONNECTING with child list "
<< this;
round_robin->channel_control_helper()->UpdateState( round_robin->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(), GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr)); MakeRefCounted<QueuePicker>(nullptr));

@ -568,15 +568,13 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
WeightedRoundRobin::Picker::~Picker() { WeightedRoundRobin::Picker::~Picker() {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < < << "[WRR " << wrr_.get() << " picker " << this << "] destroying picker";
"] destroying picker";
} }
void WeightedRoundRobin::Picker::Orphaned() { void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_); MutexLock lock(&timer_mu_);
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < < << "[WRR " << wrr_.get() << " picker " << this << "] cancelling timer";
"] cancelling timer";
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_); wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset(); timer_handle_.reset();
wrr_.reset(); wrr_.reset();
@ -643,8 +641,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{wrr_->channel_control_helper()->GetTarget()}, {wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_}); {wrr_->locality_name_});
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < < << "[WRR " << wrr_.get() << " picker " << this
"] new weights: " << absl::StrJoin(weights, " "); << "] new weights: " << absl::StrJoin(weights, " ");
auto scheduler_or = StaticStrideScheduler::Make( auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); }); weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
std::shared_ptr<StaticStrideScheduler> scheduler; std::shared_ptr<StaticStrideScheduler> scheduler;
@ -652,12 +650,12 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
scheduler = scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or)); std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < < << "[WRR " << wrr_.get() << " picker " << this
"] new scheduler: " << scheduler.get(); << "] new scheduler: " << scheduler.get();
} else { } else {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < < << "[WRR " << wrr_.get() << " picker " << this
"] no scheduler, falling back to RR"; << "] no scheduler, falling back to RR";
stats_plugins.AddCounter(kMetricRrFallback, 1, stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()}, {wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_}); {wrr_->locality_name_});
@ -686,8 +684,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
MutexLock lock(&self->timer_mu_); MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) { if (self->timer_handle_.has_value()) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << self->wrr_.get() << " picker " < < < < << "[WRR " << self->wrr_.get() << " picker " << self.get()
self.get() << "] timer fired"; << "] timer fired";
self->BuildSchedulerAndStartTimerLocked(); self->BuildSchedulerAndStartTimerLocked();
} }
} }
@ -710,8 +708,8 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
.GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD) .GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD)
.value_or("")) { .value_or("")) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Created -- locality_name=\"" < < < < << "[WRR " << this << "] Created -- locality_name=\""
std::string(locality_name_) << "\""; << std::string(locality_name_) << "\"";
} }
WeightedRoundRobin::~WeightedRoundRobin() { WeightedRoundRobin::~WeightedRoundRobin() {
@ -772,9 +770,8 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
ordered_addresses.begin(), ordered_addresses.end())); ordered_addresses.begin(), ordered_addresses.end()));
} else { } else {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "[WRR " << this << "] received update with address error: "
<< "] received update with address error: " < < < < << args.addresses.status().ToString();
args.addresses.status().ToString();
// If we already have an endpoint list, then keep using the existing // If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted. // list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status(); if (endpoint_list_ != nullptr) return args.addresses.status();
@ -887,8 +884,8 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
} }
if (new_state == GRPC_CHANNEL_IDLE) { if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] child " << this < < < < << "[WRR " << wrr << "] child " << this
" reported IDLE; requesting connection"; << " reported IDLE; requesting connection";
ExitIdleLocked(); ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) { } else if (new_state == GRPC_CHANNEL_READY) {
// If we transition back to READY state, restart the blackout period. // If we transition back to READY state, restart the blackout period.
@ -983,14 +980,14 @@ void WeightedRoundRobin::WrrEndpointList::
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE. // 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) { if (num_ready_ > 0) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting READY with endpoint list " < < < < << "[WRR " << wrr << "] reporting READY with endpoint list " << this;
this;
wrr->channel_control_helper()->UpdateState( wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(), GRPC_CHANNEL_READY, absl::Status(),
MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this)); MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) << "[WRR " << wrr < < < < GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
"] reporting CONNECTING with endpoint list " << this; << "[WRR " << wrr << "] reporting CONNECTING with endpoint list "
<< this;
wrr->channel_control_helper()->UpdateState( wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(), GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr)); MakeRefCounted<QueuePicker>(nullptr));

@ -291,8 +291,8 @@ WeightedTargetLb::WeightedTargetLb(Args args)
WeightedTargetLb::~WeightedTargetLb() { WeightedTargetLb::~WeightedTargetLb() {
GRPC_TRACE_LOG(weighted_target_lb, INFO) GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this < < < < << "[weighted_target_lb " << this
"] destroying weighted_target LB policy"; << "] destroying weighted_target LB policy";
} }
void WeightedTargetLb::ShutdownLocked() { void WeightedTargetLb::ShutdownLocked() {
@ -382,8 +382,8 @@ void WeightedTargetLb::UpdateStateLocked() {
// is being propagated to our children. // is being propagated to our children.
if (update_in_progress_) return; if (update_in_progress_) return;
GRPC_TRACE_LOG(weighted_target_lb, INFO) GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this < < < < << "[weighted_target_lb " << this
"] scanning children to determine connectivity state"; << "] scanning children to determine connectivity state";
// Construct lists of child pickers with associated weights, one for // Construct lists of child pickers with associated weights, one for
// children that are in state READY and another for children that are // children that are in state READY and another for children that are
// in state TRANSIENT_FAILURE. Each child is represented by a portion of // in state TRANSIENT_FAILURE. Each child is represented by a portion of
@ -449,9 +449,8 @@ void WeightedTargetLb::UpdateStateLocked() {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
} }
GRPC_TRACE_LOG(weighted_target_lb, INFO) GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "[weighted_target_lb " << this << "] connectivity changed to "
<< "] connectivity changed to " < < < < << ConnectivityStateName(connectivity_state);
ConnectivityStateName(connectivity_state);
RefCountedPtr<SubchannelPicker> picker; RefCountedPtr<SubchannelPicker> picker;
absl::Status status; absl::Status status;
switch (connectivity_state) { switch (connectivity_state) {
@ -524,8 +523,8 @@ WeightedTargetLb::WeightedChild::WeightedChild(
name_(name), name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) { picker_(MakeRefCounted<QueuePicker>(nullptr)) {
GRPC_TRACE_LOG(weighted_target_lb, INFO) GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get() < < < < << "[weighted_target_lb " << weighted_target_policy_.get()
"] created WeightedChild " << this << " for " << name_; << "] created WeightedChild " << this << " for " << name_;
} }
WeightedTargetLb::WeightedChild::~WeightedChild() { WeightedTargetLb::WeightedChild::~WeightedChild() {
@ -655,8 +654,8 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again. // If already deactivated, don't do that again.
if (weight_ == 0) return; if (weight_ == 0) return;
GRPC_TRACE_LOG(weighted_target_lb, INFO) GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get() < < < < << "[weighted_target_lb " << weighted_target_policy_.get()
"] WeightedChild " << this << " " << name_ << ": deactivating"; << "] WeightedChild " << this << " " << name_ << ": deactivating";
// Set the child weight to 0 so that future picker won't contain this child. // Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0; weight_ = 0;
// Start a timer to delete the child. // Start a timer to delete the child.

@ -663,9 +663,8 @@ Json CdsLb::CreateChildPolicyConfigForLeafCluster(
Json::FromObject(std::move(outlier_detection_config))}, Json::FromObject(std::move(outlier_detection_config))},
})}); })});
GRPC_TRACE_LOG(cds_lb, INFO) GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "[cdslb " << this << "] generated config for child policy: "
<< "] generated config for child policy: " < < < < << JsonDump(outlier_detection_policy, /*indent=*/1);
JsonDump(outlier_detection_policy, /*indent=*/1);
return outlier_detection_policy; return outlier_detection_policy;
} }
@ -696,9 +695,8 @@ Json CdsLb::CreateChildPolicyConfigForAggregateCluster(
})}, })},
})}); })});
GRPC_TRACE_LOG(cds_lb, INFO) GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "[cdslb " << this << "] generated config for child policy: "
<< "] generated config for child policy: " < < < < << JsonDump(json, /*indent=*/1);
JsonDump(json, /*indent=*/1);
return json; return json;
} }
@ -714,8 +712,8 @@ void CdsLb::ResetState() {
} }
void CdsLb::ReportTransientFailure(absl::Status status) { void CdsLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(cds_lb, INFO) << "[cdslb " << this < < < < GRPC_TRACE_LOG(cds_lb, INFO)
"] reporting TRANSIENT_FAILURE: " << status; << "[cdslb " << this << "] reporting TRANSIENT_FAILURE: " << status;
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,

@ -405,8 +405,8 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_stats_(xds_cluster_impl_lb->drop_stats_), drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) { picker_(std::move(picker)) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << xds_cluster_impl_lb < < < < << "[xds_cluster_impl_lb " << xds_cluster_impl_lb
"] constructed new picker " << this; << "] constructed new picker " << this;
} }
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick( LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
@ -500,14 +500,14 @@ XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client,
Args args) Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) { : LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < < << "[xds_cluster_impl_lb " << this << "] created -- using xds client "
"] created -- using xds client " << xds_client_.get(); << xds_client_.get();
} }
XdsClusterImplLb::~XdsClusterImplLb() { XdsClusterImplLb::~XdsClusterImplLb() {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < < << "[xds_cluster_impl_lb " << this
"] destroying xds_cluster_impl LB policy"; << "] destroying xds_cluster_impl LB policy";
} }
void XdsClusterImplLb::ShutdownLocked() { void XdsClusterImplLb::ShutdownLocked() {
@ -535,8 +535,8 @@ void XdsClusterImplLb::ResetState() {
void XdsClusterImplLb::ReportTransientFailure(absl::Status status) { void XdsClusterImplLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < < << "[xds_cluster_impl_lb " << this
"] reporting TRANSIENT_FAILURE: " << status; << "] reporting TRANSIENT_FAILURE: " << status;
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -766,8 +766,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_impl_lb_trace); &xds_cluster_impl_lb_trace);
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < < << "[xds_cluster_impl_lb " << this
"] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -792,8 +792,8 @@ absl::Status XdsClusterImplLb::UpdateChildPolicyLocked(
args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name()); args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name());
// Update the policy. // Update the policy.
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO) GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < < << "[xds_cluster_impl_lb " << this << "] Updating child policy handler "
"] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }

@ -250,8 +250,8 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
XdsClusterManagerLb::~XdsClusterManagerLb() { XdsClusterManagerLb::~XdsClusterManagerLb() {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO) GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this < < < < << "[xds_cluster_manager_lb " << this
"] destroying xds_cluster_manager LB policy"; << "] destroying xds_cluster_manager LB policy";
} }
void XdsClusterManagerLb::ShutdownLocked() { void XdsClusterManagerLb::ShutdownLocked() {
@ -406,16 +406,14 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
name_(name), name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) { picker_(MakeRefCounted<QueuePicker>(nullptr)) {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO) GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< xds_cluster_manager_policy_.get() < < < < << "] created ClusterChild " << this << " for " << name_;
"] created ClusterChild " << this << " for " << name_;
} }
XdsClusterManagerLb::ClusterChild::~ClusterChild() { XdsClusterManagerLb::ClusterChild::~ClusterChild() {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO) GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< xds_cluster_manager_policy_.get() < < < < << "] ClusterChild " << this << ": destroying child";
"] ClusterChild " << this << ": destroying child";
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild"); xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
} }

@ -465,8 +465,8 @@ XdsOverrideHostLb::Picker::Picker(
picker_(std::move(picker)), picker_(std::move(picker)),
override_host_health_status_set_(override_host_health_status_set) { override_host_health_status_set_(override_host_health_status_set) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() < < < < << "[xds_override_host_lb " << policy_.get()
"] constructed new picker " << this; << "] constructed new picker " << this;
} }
absl::optional<LoadBalancingPolicy::PickResult> absl::optional<LoadBalancingPolicy::PickResult>
@ -605,9 +605,8 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
// with lock contention and CPU usage due to sweeps over the map. // with lock contention and CPU usage due to sweeps over the map.
duration = std::max(duration, Duration::Seconds(5)); duration = std::max(duration, Duration::Seconds(5));
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< "] idle timer " < < < < << ": subchannel cleanup pass will run in " << duration;
this << ": subchannel cleanup pass will run in " << duration;
timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter( timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter(
duration, [self = RefAsSubclass<IdleTimer>()]() mutable { duration, [self = RefAsSubclass<IdleTimer>()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx; ApplicationCallbackExecCtx callback_exec_ctx;
@ -622,9 +621,8 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
void XdsOverrideHostLb::IdleTimer::Orphan() { void XdsOverrideHostLb::IdleTimer::Orphan() {
if (timer_handle_.has_value()) { if (timer_handle_.has_value()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< "] idle timer " < < < < << ": cancelling";
this << ": cancelling";
policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_); policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset(); timer_handle_.reset();
} }
@ -635,9 +633,8 @@ void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
if (timer_handle_.has_value()) { if (timer_handle_.has_value()) {
timer_handle_.reset(); timer_handle_.reset();
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< "] idle timer " < < < < << ": timer fired";
this << ": timer fired";
policy_->CleanupSubchannels(); policy_->CleanupSubchannels();
} }
} }
@ -655,8 +652,8 @@ XdsOverrideHostLb::XdsOverrideHostLb(Args args)
XdsOverrideHostLb::~XdsOverrideHostLb() { XdsOverrideHostLb::~XdsOverrideHostLb() {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] destroying xds_override_host LB policy"; << "] destroying xds_override_host LB policy";
} }
void XdsOverrideHostLb::ShutdownLocked() { void XdsOverrideHostLb::ShutdownLocked() {
@ -694,8 +691,8 @@ void XdsOverrideHostLb::ResetState() {
void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) { void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] reporting TRANSIENT_FAILURE: " << status; << "] reporting TRANSIENT_FAILURE: " << status;
ResetState(); ResetState();
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -788,8 +785,8 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
std::make_shared<ChildEndpointIterator>(std::move(*args.addresses)); std::make_shared<ChildEndpointIterator>(std::move(*args.addresses));
} else { } else {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] address error: " << args.addresses.status(); << "] address error: " << args.addresses.status();
} }
// Create child policy if needed. // Create child policy if needed.
if (child_policy_ == nullptr) { if (child_policy_ == nullptr) {
@ -802,8 +799,8 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
update_args.config = new_config->child_config(); update_args.config = new_config->child_config();
update_args.args = args_; update_args.args = args_;
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this << "] Updating child policy handler "
"] Updating child policy handler " << child_policy_.get(); << child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -833,8 +830,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsOverrideHostLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_override_host_lb_trace); &xds_override_host_lb_trace);
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] Created new child policy handler " << lb_policy.get(); << "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call. // this policy, which in turn is tied to the application's call.
@ -872,8 +869,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false); auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false);
if (!key.ok()) { if (!key.ok()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] no key for endpoint address; not adding to map"; << "] no key for endpoint address; not adding to map";
} else { } else {
addresses.push_back(*std::move(key)); addresses.push_back(*std::move(key));
} }
@ -901,9 +898,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) { for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) {
if (addresses_for_map.find(it->first) == addresses_for_map.end()) { if (addresses_for_map.find(it->first) == addresses_for_map.end()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "[xds_override_host_lb " << this << "] removing map key "
<< "] removing map key " < < < < << it->first;
it->first;
it->second->UnsetSubchannel(&subchannel_refs_to_drop); it->second->UnsetSubchannel(&subchannel_refs_to_drop);
it = subchannel_map_.erase(it); it = subchannel_map_.erase(it);
} else { } else {
@ -916,9 +912,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto it = subchannel_map_.find(address); auto it = subchannel_map_.find(address);
if (it == subchannel_map_.end()) { if (it == subchannel_map_.end()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "[xds_override_host_lb " << this << "] adding map key "
<< "] adding map key " < < < < << address;
address;
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>()) it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first; .first;
} }
@ -967,8 +962,8 @@ XdsOverrideHostLb::AdoptSubchannel(
void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) { void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this << "] creating owned subchannel for "
"] creating owned subchannel for " << address; << address;
auto addr = StringToSockaddr(address); auto addr = StringToSockaddr(address);
CHECK(addr.ok()); CHECK(addr.ok());
// Note: We don't currently have any cases where per_address_args need to // Note: We don't currently have any cases where per_address_args need to
@ -1009,8 +1004,8 @@ void XdsOverrideHostLb::CleanupSubchannels() {
auto subchannel = p.second->TakeOwnedSubchannel(); auto subchannel = p.second->TakeOwnedSubchannel();
if (subchannel != nullptr) { if (subchannel != nullptr) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < < << "[xds_override_host_lb " << this
"] dropping subchannel for " << p.first; << "] dropping subchannel for " << p.first;
subchannel_refs_to_drop.push_back(std::move(subchannel)); subchannel_refs_to_drop.push_back(std::move(subchannel));
} }
} else { } else {
@ -1085,8 +1080,8 @@ void XdsOverrideHostLb::SubchannelWrapper::CancelConnectivityStateWatch(
void XdsOverrideHostLb::SubchannelWrapper::Orphaned() { void XdsOverrideHostLb::SubchannelWrapper::Orphaned() {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() < < < < << "[xds_override_host_lb " << policy_.get() << "] subchannel wrapper "
"] subchannel wrapper " << this << " orphaned"; << this << " orphaned";
if (!IsWorkSerializerDispatchEnabled()) { if (!IsWorkSerializerDispatchEnabled()) {
wrapped_subchannel()->CancelConnectivityStateWatch(watcher_); wrapped_subchannel()->CancelConnectivityStateWatch(watcher_);
if (subchannel_entry_ != nullptr) { if (subchannel_entry_ != nullptr) {
@ -1197,9 +1192,9 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
if (subchannel != wrapper) return; if (subchannel != wrapper) return;
if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) { if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] removing unowned subchannel " << "[xds_override_host_lb] removing unowned subchannel "
"wrapper " < < < < "wrapper "
subchannel; << subchannel;
subchannel_ = nullptr; subchannel_ = nullptr;
} else { } else {
// The subchannel is being released by the child policy, but it // The subchannel is being released by the child policy, but it
@ -1207,9 +1202,8 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
// the wrapper with the same underlying subchannel, and we hold // the wrapper with the same underlying subchannel, and we hold
// our own ref to it. // our own ref to it.
GRPC_TRACE_LOG(xds_override_host_lb, INFO) GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] subchannel wrapper " << "[xds_override_host_lb] subchannel wrapper " << subchannel
<< subchannel < < < < << ": cloning to gain ownership";
": cloning to gain ownership";
subchannel_ = wrapper->Clone(); subchannel_ = wrapper->Clone();
} }
} }

@ -240,9 +240,8 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
update_args.args = std::move(args.args); update_args.args = std::move(args.args);
// Update the policy. // Update the policy.
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO) GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "[xds_wrr_locality_lb " << this << "] updating child policy "
<< "] updating child policy " < < < < << child_policy_.get();
child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args)); return child_policy_->UpdateLocked(std::move(update_args));
} }
@ -257,8 +256,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsWrrLocalityLb::CreateChildPolicyLocked(
CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy( CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy(
"weighted_target_experimental", std::move(lb_policy_args)); "weighted_target_experimental", std::move(lb_policy_args));
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO) GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this < < < < << "[xds_wrr_locality_lb " << this << "] created new child policy "
"] created new child policy " << lb_policy.get(); << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created // Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// this LB policy, which in turn is tied to the application's call. // this LB policy, which in turn is tied to the application's call.

Loading…
Cancel
Save