[Gpr_To_Absl_Logging]

pull/37349/head
tanvi-jagtap 7 months ago
parent a10c7a8d4f
commit 70ca4b8dce
  1. 25
      src/core/load_balancing/grpclb/grpclb.cc
  2. 8
      src/core/load_balancing/health_check_client.cc
  3. 4
      src/core/load_balancing/oob_backend_metric.cc
  4. 44
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  5. 61
      src/core/load_balancing/pick_first/pick_first.cc
  6. 22
      src/core/load_balancing/priority/priority.cc
  7. 5
      src/core/load_balancing/ring_hash/ring_hash.cc
  8. 80
      src/core/load_balancing/rls/rls.cc
  9. 19
      src/core/load_balancing/round_robin/round_robin.cc
  10. 43
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  11. 21
      src/core/load_balancing/weighted_target/weighted_target.cc
  12. 14
      src/core/load_balancing/xds/cds.cc
  13. 24
      src/core/load_balancing/xds/xds_cluster_impl.cc
  14. 14
      src/core/load_balancing/xds/xds_cluster_manager.cc
  15. 76
      src/core/load_balancing/xds/xds_override_host.cc
  16. 9
      src/core/load_balancing/xds/xds_wrr_locality.cc

@ -949,8 +949,8 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr);
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this < < < <
": Starting LB call " << lb_call_;
<< "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
<< ": Starting LB call " << lb_call_;
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -1526,8 +1526,9 @@ class GrpcLb::NullLbTokenEndpointIterator final
void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback)
const override {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this < < < <
"] fallback address: " << endpoint.ToString();
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this
<< "] fallback address: " << endpoint.ToString();
callback(EndpointAddresses(endpoint.addresses(),
endpoint.args().SetObject(empty_token_)));
});
@ -1762,9 +1763,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace);
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this
<< "] Created new child policy handler (" < < < <
lb_policy.get() << ")";
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Created new child policy handler ("
<< lb_policy.get() << ")";
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
@ -1816,8 +1817,8 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
}
// Update the policy.
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Updating child policy handler " < < < <
child_policy_.get();
<< "[grpclb " << this << "] Updating child policy handler "
<< child_policy_.get();
// TODO(roth): If we're in fallback mode and the child policy rejects the
// update, we should propagate that failure back to the resolver somehow.
(void)child_policy_->UpdateLocked(std::move(update_args));
@ -1859,9 +1860,9 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
subchannel_cache_timer_handle_.reset();
auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) {
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this << "] removing "
<< it->second.size() < < < <
" subchannels from cache";
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] removing " << it->second.size()
<< " subchannels from cache";
cached_subchannels_.erase(it);
}
if (!cached_subchannels_.empty()) {

@ -342,8 +342,8 @@ class HealthProducer::ConnectivityWatcher final
void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this << ": starting with subchannel " < < < <
subchannel.get();
<< "HealthProducer " << this << ": starting with subchannel "
<< subchannel.get();
subchannel_ = std::move(subchannel);
{
MutexLock lock(&mu_);
@ -498,8 +498,8 @@ MakeHealthCheckWatcher(
args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME);
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "creating HealthWatcher -- health_check_service_name=\"" < < < <
health_check_service_name.value_or("N/A") << "\"";
<< "creating HealthWatcher -- health_check_service_name=\""
<< health_check_service_name.value_or("N/A") << "\"";
return std::make_unique<HealthWatcher>(std::move(work_serializer),
std::move(health_check_service_name),
std::move(watcher));

@ -274,8 +274,8 @@ void OrcaProducer::MaybeStartStreamLocked() {
void OrcaProducer::NotifyWatchers(
const BackendMetricData& backend_metric_data) {
GRPC_TRACE_LOG(orca_client, INFO) << "OrcaProducer " << this < < < <
": reporting backend metrics to watchers";
GRPC_TRACE_LOG(orca_client, INFO)
<< "OrcaProducer " << this << ": reporting backend metrics to watchers";
MutexLock lock(&mu_);
for (OrcaWatcher* watcher : watchers_) {
watcher->watcher()->OnBackendMetricReport(backend_metric_data);

@ -581,8 +581,8 @@ OutlierDetectionLb::OutlierDetectionLb(Args args)
OutlierDetectionLb::~OutlierDetectionLb() {
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] destroying outlier_detection LB policy";
<< "[outlier_detection_lb " << this
<< "] destroying outlier_detection LB policy";
}
void OutlierDetectionLb::ShutdownLocked() {
@ -622,8 +622,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
if (!config_->CountingEnabled()) {
// No need for timer. Cancel the current timer, if any.
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] counting disabled, cancelling timer";
<< "[outlier_detection_lb " << this
<< "] counting disabled, cancelling timer";
ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) {
// No timer running. Start it now.
@ -642,8 +642,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Note that if the new deadline is in the past, the timer will fire
// immediately.
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] interval changed, replacing timer";
<< "[outlier_detection_lb " << this
<< "] interval changed, replacing timer";
ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime());
}
@ -661,8 +661,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
auto it = endpoint_state_map_.find(key);
if (it == endpoint_state_map_.end()) {
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] adding endpoint entry for " << key.ToString();
<< "[outlier_detection_lb " << this
<< "] adding endpoint entry for " << key.ToString();
// The endpoint is not present in the map, so we'll need to add it.
// Start by getting a pointer to the entry for each address in the
// subchannel map, creating the entry if needed.
@ -719,8 +719,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
it != endpoint_state_map_.end();) {
if (current_endpoints.find(it->first) == current_endpoints.end()) {
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] removing endpoint map entry " << it->first.ToString();
<< "[outlier_detection_lb " << this
<< "] removing endpoint map entry " << it->first.ToString();
it = endpoint_state_map_.erase(it);
} else {
++it;
@ -738,8 +738,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
update_args.config = config_->child_policy();
update_args.args = std::move(args.args);
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] Updating child policy handler " << child_policy_.get();
<< "[outlier_detection_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -769,8 +769,8 @@ OrphanablePtr<LoadBalancingPolicy> OutlierDetectionLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&outlier_detection_lb_trace);
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this < < < <
"] Created new child policy handler " << lb_policy.get();
<< "[outlier_detection_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -836,8 +836,8 @@ OutlierDetectionLb::EjectionTimer::EjectionTimer(
: parent_(std::move(parent)), start_time_(start_time) {
auto interval = parent_->config_->outlier_detection_config().interval;
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < <
"] ejection timer will run in " << interval.ToString();
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer will run in " << interval.ToString();
timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter(
interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -861,8 +861,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!timer_handle_.has_value()) return;
timer_handle_.reset();
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < <
"] ejection timer running";
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer running";
std::map<EndpointState*, double> success_rate_ejection_candidates;
std::map<EndpointState*, double> failure_percentage_ejection_candidates;
size_t ejected_host_count = 0;
@ -959,8 +959,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < <
"] ejecting candidate";
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
candidate.first->Eject(time_now);
++ejected_host_count;
}
@ -1005,8 +1005,8 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() < < < <
"] ejecting candidate";
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
candidate.first->Eject(time_now);
++ejected_host_count;
}

@ -632,8 +632,9 @@ void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] health watch state update: " < < < <
ConnectivityStateName(new_state) << " (" << status << ")";
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -671,9 +672,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
: subchannel_data_(subchannel_data),
pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << pick_first_.get()
<< "] subchannel state " << this < < < <
" (subchannel " << subchannel_.get() << "): starting watch";
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get() << "): starting watch";
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get();
subchannel_->WatchConnectivityState(std::move(watcher));
@ -694,9 +695,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
}
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << pick_first_.get()
<< " selected subchannel " < < < <
subchannel_.get();
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << pick_first_.get() << " selected subchannel "
<< subchannel_.get();
CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_);
@ -1004,8 +1005,8 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " < < < <
this << " - channel args: " << args_.ToString();
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1034,14 +1035,12 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
PickFirst::SubchannelList::~SubchannelList() {
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " < < < <
this;
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void PickFirst::SubchannelList::Orphan() {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << policy_.get()
<< "] Shutting down subchannel_list " < < < <
this;
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
// Cancel Happy Eyeballs timer, if any.
@ -1083,9 +1082,9 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << policy_.get()
<< " subchannel list " << this < < < <
" failed to connect to all subchannels";
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
// Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution();
absl::Status status = absl::UnavailableError(
@ -1558,8 +1557,9 @@ void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] health watch state update: " < < < <
ConnectivityStateName(new_state) << " (" << status << ")";
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -1905,8 +1905,7 @@ void OldPickFirst::SubchannelList::SubchannelData::
}
// Cases 1 and 2.
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " selected subchannel " < < < <
subchannel_.get();
<< "Pick First " << p << " selected subchannel " << subchannel_.get();
p->selected_ = this;
// If health checking is enabled, start the health watch, but don't
// report a new picker -- we want to stay in CONNECTING while we wait
@ -1949,8 +1948,8 @@ OldPickFirst::SubchannelList::SubchannelList(
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " < < < <
this << " - channel args: " << args_.ToString();
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1978,14 +1977,12 @@ OldPickFirst::SubchannelList::SubchannelList(
OldPickFirst::SubchannelList::~SubchannelList() {
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " < < < <
this;
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void OldPickFirst::SubchannelList::Orphan() {
GRPC_TRACE_LOG(pick_first, INFO) << "[PF " << policy_.get()
<< "] Shutting down subchannel_list " < < < <
this;
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
for (auto& sd : subchannels_) {
@ -2029,9 +2026,9 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << policy_.get()
<< " subchannel list " << this < < < <
" failed to connect to all subchannels";
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
// In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us.

@ -403,9 +403,8 @@ void PriorityLb::ChoosePriorityLocked() {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority "
<< priority < < < <
", child " << child_name;
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
auto& child = children_[child_name];
// Create child if needed.
if (child == nullptr) {
@ -462,9 +461,8 @@ void PriorityLb::ChoosePriorityLocked() {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority "
<< priority < < < <
", child " << child_name;
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
auto& child = children_[child_name];
CHECK(child != nullptr);
if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) {
@ -627,16 +625,16 @@ PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() < < < <
"] creating child " << name_ << " (" << this << ")";
<< "[priority_lb " << priority_policy_.get() << "] creating child "
<< name_ << " (" << this << ")";
// Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
}
void PriorityLb::ChildPriority::Orphan() {
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " < < < <
name_ << " (" << this << "): orphaned";
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): orphaned";
failover_timer_.reset();
deactivation_timer_.reset();
// Remove the child policy's interested_parties pollset_set from the
@ -664,8 +662,8 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus();
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " < < < <
name_ << " (" << this << "): start update";
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): start update";
ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed.
if (child_policy_ == nullptr) {

@ -656,9 +656,8 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
});
} else {
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this
<< "] received update with addresses error: " < < < <
args.addresses.status();
<< "[RH " << this << "] received update with addresses error: "
<< args.addresses.status();
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (!endpoints_.empty()) return args.addresses.status();

@ -1040,8 +1040,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)};
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < <
": request keys: " << key.ToString();
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": request keys: " << key.ToString();
Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) {
@ -1077,8 +1077,8 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
// If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < <
": using cache entry " << entry;
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": using cache entry " << entry;
return entry->Pick(args);
}
// If the entry is in backoff, then use the default target if set,
@ -1092,25 +1092,25 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
}
// RLS call pending. Queue the pick.
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this < < < <
": RLS request pending; queuing pick";
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": RLS request pending; queuing pick";
return PickResult::Queue();
}
LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_.get()
<< "] picker=" << this << ": " < < < <
reason << "; using default target";
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; using default target";
auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
config_->default_target(), pick_result);
return pick_result;
}
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_.get()
<< "] picker=" << this << ": " < < < <
reason << "; failing pick";
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << reason
<< "; failing pick";
auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup();
stats_plugins.AddCounter(kMetricFailedPicks, 1,
@ -1200,8 +1200,8 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
void RlsLb::Cache::Entry::Orphan() {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this < < < <
" " << lru_iterator_->ToString() << ": cache entry evicted";
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": cache entry evicted";
is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
@ -1398,22 +1398,21 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size;
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString() < < < <
": cache entry added, entry=" << entry;
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": cache entry added, entry=" << entry;
return entry;
}
// Entry found, so use it.
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString() < < < <
": found cache entry " << it->second.get();
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": found cache entry " << it->second.get();
it->second->MarkUsed();
return it->second.get();
}
void RlsLb::Cache::Resize(size_t bytes) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes < < < <
" bytes";
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
size_limit_ = bytes;
MaybeShrinkSize(size_limit_);
}
@ -1498,9 +1497,9 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
auto map_it = map_.find(*lru_it);
CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break;
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << lb_policy_
<< "] LRU eviction: removing entry " < < < <
map_it->second.get() << " " << lru_it->ToString();
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString();
size_ -= map_it->second->Size();
map_.erase(map_it);
}
@ -1639,8 +1638,8 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
void RlsLb::RlsChannel::Orphan() {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this < < < <
", channel=" << channel_.get() << ": shutdown";
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ", channel=" << channel_.get() << ": shutdown";
is_shutdown_ = true;
if (channel_ != nullptr) {
// Remove channelz linkage.
@ -1705,8 +1704,8 @@ RlsLb::RlsRequest::RlsRequest(
reason_(reason),
stale_header_data_(std::move(stale_header_data)) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this < < < <
": RLS request created for key " << key_.ToString();
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< ": RLS request created for key " << key_.ToString();
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run(
DEBUG_LOCATION,
@ -1720,8 +1719,8 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this < < < <
" " << key_.ToString() << ": cancelling RLS call";
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
<< key_.ToString() << ": cancelling RLS call";
grpc_call_cancel_internal(call_);
}
Unref(DEBUG_LOCATION, "Orphan");
@ -2018,8 +2017,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
config_->default_target());
created_default_child = true;
} else {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < <
"] using existing child for default target";
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] using existing child for default target";
default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
}
@ -2048,8 +2047,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
p.second->StartUpdate();
}
} else if (created_default_child) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < <
"] starting default child policy update";
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] starting default child policy update";
default_child_policy_->StartUpdate();
}
}
@ -2067,8 +2066,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
}
}
} else if (created_default_child) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this < < < <
"] finishing default child policy update";
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] finishing default child policy update";
absl::Status status = default_child_policy_->MaybeFinishUpdate();
if (!status.ok()) {
errors.emplace_back(absl::StrCat("target ", config_->default_target(),
@ -2167,9 +2166,9 @@ void RlsLb::UpdatePickerLocked() {
if (is_shutdown_) return;
for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state();
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] target "
<< p.second->target() < < < <
" in state " << ConnectivityStateName(child_state);
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] target " << p.second->target()
<< " in state " << ConnectivityStateName(child_state);
if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY;
break;
@ -2188,9 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
}
}
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] reporting state " < < < <
ConnectivityStateName(state);
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] reporting state "
<< ConnectivityStateName(state);
absl::Status status;
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
status = absl::UnavailableError("no children available");

@ -247,9 +247,8 @@ absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
addresses = args.addresses->get();
} else {
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << this
<< "] received update with address error: " < < < <
args.addresses.status();
<< "[RR " << this
<< "] received update with address error: " << args.addresses.status();
// If we already have a child list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -314,8 +313,8 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
}
if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] child " << this < < < <
" reported IDLE; requesting connection";
<< "[RR " << round_robin << "] child " << this
<< " reported IDLE; requesting connection";
ExitIdleLocked();
}
// If state changed, update state counters.
@ -396,9 +395,8 @@ void RoundRobin::RoundRobinEndpointList::
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin
<< "] reporting READY with child list " < < < <
this;
<< "[RR " << round_robin << "] reporting READY with child list "
<< this;
std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers;
for (const auto& endpoint : endpoints()) {
auto state = endpoint->connectivity_state();
@ -411,8 +409,9 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_READY, absl::OkStatus(),
MakeRefCounted<Picker>(round_robin, std::move(pickers)));
} else if (num_connecting_ > 0) {
GRPC_TRACE_LOG(round_robin, INFO) << "[RR " << round_robin < < < <
"] reporting CONNECTING with child list " << this;
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] reporting CONNECTING with child list "
<< this;
round_robin->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -568,15 +568,13 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
WeightedRoundRobin::Picker::~Picker() {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < <
"] destroying picker";
<< "[WRR " << wrr_.get() << " picker " << this << "] destroying picker";
}
void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_);
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < <
"] cancelling timer";
<< "[WRR " << wrr_.get() << " picker " << this << "] cancelling timer";
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
wrr_.reset();
@ -643,8 +641,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < <
"] new weights: " << absl::StrJoin(weights, " ");
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new weights: " << absl::StrJoin(weights, " ");
auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
std::shared_ptr<StaticStrideScheduler> scheduler;
@ -652,12 +650,12 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < <
"] new scheduler: " << scheduler.get();
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new scheduler: " << scheduler.get();
} else {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this < < < <
"] no scheduler, falling back to RR";
<< "[WRR " << wrr_.get() << " picker " << this
<< "] no scheduler, falling back to RR";
stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
@ -686,8 +684,8 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << self->wrr_.get() << " picker " < < < <
self.get() << "] timer fired";
<< "[WRR " << self->wrr_.get() << " picker " << self.get()
<< "] timer fired";
self->BuildSchedulerAndStartTimerLocked();
}
}
@ -710,8 +708,8 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
.GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD)
.value_or("")) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Created -- locality_name=\"" < < < <
std::string(locality_name_) << "\"";
<< "[WRR " << this << "] Created -- locality_name=\""
<< std::string(locality_name_) << "\"";
}
WeightedRoundRobin::~WeightedRoundRobin() {
@ -772,9 +770,8 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
ordered_addresses.begin(), ordered_addresses.end()));
} else {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this
<< "] received update with address error: " < < < <
args.addresses.status().ToString();
<< "[WRR " << this << "] received update with address error: "
<< args.addresses.status().ToString();
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -887,8 +884,8 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
}
if (new_state == GRPC_CHANNEL_IDLE) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] child " << this < < < <
" reported IDLE; requesting connection";
<< "[WRR " << wrr << "] child " << this
<< " reported IDLE; requesting connection";
ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) {
// If we transition back to READY state, restart the blackout period.
@ -983,14 +980,14 @@ void WeightedRoundRobin::WrrEndpointList::
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting READY with endpoint list " < < < <
this;
<< "[WRR " << wrr << "] reporting READY with endpoint list " << this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(),
MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this));
} else if (num_connecting_ > 0) {
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO) << "[WRR " << wrr < < < <
"] reporting CONNECTING with endpoint list " << this;
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting CONNECTING with endpoint list "
<< this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -291,8 +291,8 @@ WeightedTargetLb::WeightedTargetLb(Args args)
WeightedTargetLb::~WeightedTargetLb() {
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this < < < <
"] destroying weighted_target LB policy";
<< "[weighted_target_lb " << this
<< "] destroying weighted_target LB policy";
}
void WeightedTargetLb::ShutdownLocked() {
@ -382,8 +382,8 @@ void WeightedTargetLb::UpdateStateLocked() {
// is being propagated to our children.
if (update_in_progress_) return;
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this < < < <
"] scanning children to determine connectivity state";
<< "[weighted_target_lb " << this
<< "] scanning children to determine connectivity state";
// Construct lists of child pickers with associated weights, one for
// children that are in state READY and another for children that are
// in state TRANSIENT_FAILURE. Each child is represented by a portion of
@ -449,9 +449,8 @@ void WeightedTargetLb::UpdateStateLocked() {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this
<< "] connectivity changed to " < < < <
ConnectivityStateName(connectivity_state);
<< "[weighted_target_lb " << this << "] connectivity changed to "
<< ConnectivityStateName(connectivity_state);
RefCountedPtr<SubchannelPicker> picker;
absl::Status status;
switch (connectivity_state) {
@ -524,8 +523,8 @@ WeightedTargetLb::WeightedChild::WeightedChild(
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get() < < < <
"] created WeightedChild " << this << " for " << name_;
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] created WeightedChild " << this << " for " << name_;
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
@ -655,8 +654,8 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get() < < < <
"] WeightedChild " << this << " " << name_ << ": deactivating";
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": deactivating";
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.

@ -663,9 +663,8 @@ Json CdsLb::CreateChildPolicyConfigForLeafCluster(
Json::FromObject(std::move(outlier_detection_config))},
})});
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this
<< "] generated config for child policy: " < < < <
JsonDump(outlier_detection_policy, /*indent=*/1);
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(outlier_detection_policy, /*indent=*/1);
return outlier_detection_policy;
}
@ -696,9 +695,8 @@ Json CdsLb::CreateChildPolicyConfigForAggregateCluster(
})},
})});
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this
<< "] generated config for child policy: " < < < <
JsonDump(json, /*indent=*/1);
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(json, /*indent=*/1);
return json;
}
@ -714,8 +712,8 @@ void CdsLb::ResetState() {
}
void CdsLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(cds_lb, INFO) << "[cdslb " << this < < < <
"] reporting TRANSIENT_FAILURE: " << status;
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] reporting TRANSIENT_FAILURE: " << status;
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,

@ -405,8 +405,8 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << xds_cluster_impl_lb < < < <
"] constructed new picker " << this;
<< "[xds_cluster_impl_lb " << xds_cluster_impl_lb
<< "] constructed new picker " << this;
}
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
@ -500,14 +500,14 @@ XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client,
Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < <
"] created -- using xds client " << xds_client_.get();
<< "[xds_cluster_impl_lb " << this << "] created -- using xds client "
<< xds_client_.get();
}
XdsClusterImplLb::~XdsClusterImplLb() {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < <
"] destroying xds_cluster_impl LB policy";
<< "[xds_cluster_impl_lb " << this
<< "] destroying xds_cluster_impl LB policy";
}
void XdsClusterImplLb::ShutdownLocked() {
@ -535,8 +535,8 @@ void XdsClusterImplLb::ResetState() {
void XdsClusterImplLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < <
"] reporting TRANSIENT_FAILURE: " << status;
<< "[xds_cluster_impl_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -766,8 +766,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_impl_lb_trace);
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < <
"] Created new child policy handler " << lb_policy.get();
<< "[xds_cluster_impl_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -792,8 +792,8 @@ absl::Status XdsClusterImplLb::UpdateChildPolicyLocked(
args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name());
// Update the policy.
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this < < < <
"] Updating child policy handler " << child_policy_.get();
<< "[xds_cluster_impl_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}

@ -250,8 +250,8 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
XdsClusterManagerLb::~XdsClusterManagerLb() {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this < < < <
"] destroying xds_cluster_manager LB policy";
<< "[xds_cluster_manager_lb " << this
<< "] destroying xds_cluster_manager LB policy";
}
void XdsClusterManagerLb::ShutdownLocked() {
@ -406,16 +406,14 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb "
<< xds_cluster_manager_policy_.get() < < < <
"] created ClusterChild " << this << " for " << name_;
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] created ClusterChild " << this << " for " << name_;
}
XdsClusterManagerLb::ClusterChild::~ClusterChild() {
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb "
<< xds_cluster_manager_policy_.get() < < < <
"] ClusterChild " << this << ": destroying child";
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << ": destroying child";
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
}

@ -465,8 +465,8 @@ XdsOverrideHostLb::Picker::Picker(
picker_(std::move(picker)),
override_host_health_status_set_(override_host_health_status_set) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() < < < <
"] constructed new picker " << this;
<< "[xds_override_host_lb " << policy_.get()
<< "] constructed new picker " << this;
}
absl::optional<LoadBalancingPolicy::PickResult>
@ -605,9 +605,8 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
// with lock contention and CPU usage due to sweeps over the map.
duration = std::max(duration, Duration::Seconds(5));
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get()
<< "] idle timer " < < < <
this << ": subchannel cleanup pass will run in " << duration;
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": subchannel cleanup pass will run in " << duration;
timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter(
duration, [self = RefAsSubclass<IdleTimer>()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -622,9 +621,8 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
void XdsOverrideHostLb::IdleTimer::Orphan() {
if (timer_handle_.has_value()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get()
<< "] idle timer " < < < <
this << ": cancelling";
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": cancelling";
policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
}
@ -635,9 +633,8 @@ void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get()
<< "] idle timer " < < < <
this << ": timer fired";
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": timer fired";
policy_->CleanupSubchannels();
}
}
@ -655,8 +652,8 @@ XdsOverrideHostLb::XdsOverrideHostLb(Args args)
XdsOverrideHostLb::~XdsOverrideHostLb() {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] destroying xds_override_host LB policy";
<< "[xds_override_host_lb " << this
<< "] destroying xds_override_host LB policy";
}
void XdsOverrideHostLb::ShutdownLocked() {
@ -694,8 +691,8 @@ void XdsOverrideHostLb::ResetState() {
void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] reporting TRANSIENT_FAILURE: " << status;
<< "[xds_override_host_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -788,8 +785,8 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
std::make_shared<ChildEndpointIterator>(std::move(*args.addresses));
} else {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] address error: " << args.addresses.status();
<< "[xds_override_host_lb " << this
<< "] address error: " << args.addresses.status();
}
// Create child policy if needed.
if (child_policy_ == nullptr) {
@ -802,8 +799,8 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
update_args.config = new_config->child_config();
update_args.args = args_;
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] Updating child policy handler " << child_policy_.get();
<< "[xds_override_host_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -833,8 +830,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsOverrideHostLb::CreateChildPolicyLocked(
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_override_host_lb_trace);
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] Created new child policy handler " << lb_policy.get();
<< "[xds_override_host_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -872,8 +869,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false);
if (!key.ok()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] no key for endpoint address; not adding to map";
<< "[xds_override_host_lb " << this
<< "] no key for endpoint address; not adding to map";
} else {
addresses.push_back(*std::move(key));
}
@ -901,9 +898,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) {
if (addresses_for_map.find(it->first) == addresses_for_map.end()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] removing map key " < < < <
it->first;
<< "[xds_override_host_lb " << this << "] removing map key "
<< it->first;
it->second->UnsetSubchannel(&subchannel_refs_to_drop);
it = subchannel_map_.erase(it);
} else {
@ -916,9 +912,8 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto it = subchannel_map_.find(address);
if (it == subchannel_map_.end()) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] adding map key " < < < <
address;
<< "[xds_override_host_lb " << this << "] adding map key "
<< address;
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first;
}
@ -967,8 +962,8 @@ XdsOverrideHostLb::AdoptSubchannel(
void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] creating owned subchannel for " << address;
<< "[xds_override_host_lb " << this << "] creating owned subchannel for "
<< address;
auto addr = StringToSockaddr(address);
CHECK(addr.ok());
// Note: We don't currently have any cases where per_address_args need to
@ -1009,8 +1004,8 @@ void XdsOverrideHostLb::CleanupSubchannels() {
auto subchannel = p.second->TakeOwnedSubchannel();
if (subchannel != nullptr) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this < < < <
"] dropping subchannel for " << p.first;
<< "[xds_override_host_lb " << this
<< "] dropping subchannel for " << p.first;
subchannel_refs_to_drop.push_back(std::move(subchannel));
}
} else {
@ -1085,8 +1080,8 @@ void XdsOverrideHostLb::SubchannelWrapper::CancelConnectivityStateWatch(
void XdsOverrideHostLb::SubchannelWrapper::Orphaned() {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() < < < <
"] subchannel wrapper " << this << " orphaned";
<< "[xds_override_host_lb " << policy_.get() << "] subchannel wrapper "
<< this << " orphaned";
if (!IsWorkSerializerDispatchEnabled()) {
wrapped_subchannel()->CancelConnectivityStateWatch(watcher_);
if (subchannel_entry_ != nullptr) {
@ -1197,9 +1192,9 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
if (subchannel != wrapper) return;
if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) {
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] removing unowned subchannel "
"wrapper " < < < <
subchannel;
<< "[xds_override_host_lb] removing unowned subchannel "
"wrapper "
<< subchannel;
subchannel_ = nullptr;
} else {
// The subchannel is being released by the child policy, but it
@ -1207,9 +1202,8 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
// the wrapper with the same underlying subchannel, and we hold
// our own ref to it.
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] subchannel wrapper "
<< subchannel < < < <
": cloning to gain ownership";
<< "[xds_override_host_lb] subchannel wrapper " << subchannel
<< ": cloning to gain ownership";
subchannel_ = wrapper->Clone();
}
}

@ -240,9 +240,8 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
update_args.args = std::move(args.args);
// Update the policy.
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this
<< "] updating child policy " < < < <
child_policy_.get();
<< "[xds_wrr_locality_lb " << this << "] updating child policy "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -257,8 +256,8 @@ OrphanablePtr<LoadBalancingPolicy> XdsWrrLocalityLb::CreateChildPolicyLocked(
CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy(
"weighted_target_experimental", std::move(lb_policy_args));
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this < < < <
"] created new child policy " << lb_policy.get();
<< "[xds_wrr_locality_lb " << this << "] created new child policy "
<< lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this LB policy, which in turn is tied to the application's call.

Loading…
Cancel
Save