[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED (#37349)

[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED

Used find and replace for this PR

Find String :

```
if \(GRPC_TRACE_FLAG_ENABLED\((.*)\)\) {
(.*)LOG\(INFO\)(.*)
(.*);
(.*)}(.*)
```

Replace String

`GRPC_TRACE_LOG($1, INFO) $3 $4;`

Closes #37349

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37349 from tanvi-jagtap:GRPC_TRACE_FLAG_ENABLED_01 70ca4b8dce
PiperOrigin-RevId: 659039786
pull/37396/head
Tanvi Jagtap 4 months ago committed by Copybara-Service
parent cdc0263b1f
commit 523b22d219
  1. 25
      src/core/load_balancing/grpclb/grpclb.cc
  2. 10
      src/core/load_balancing/health_check_client.cc
  3. 6
      src/core/load_balancing/oob_backend_metric.cc
  4. 57
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  5. 80
      src/core/load_balancing/pick_first/pick_first.cc
  6. 31
      src/core/load_balancing/priority/priority.cc
  7. 5
      src/core/load_balancing/ring_hash/ring_hash.cc
  8. 99
      src/core/load_balancing/rls/rls.cc
  9. 24
      src/core/load_balancing/round_robin/round_robin.cc
  10. 62
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  11. 25
      src/core/load_balancing/weighted_target/weighted_target.cc
  12. 16
      src/core/load_balancing/xds/cds.cc
  13. 34
      src/core/load_balancing/xds/xds_cluster_impl.cc
  14. 15
      src/core/load_balancing/xds/xds_cluster_manager.cc
  15. 98
      src/core/load_balancing/xds/xds_override_host.cc
  16. 12
      src/core/load_balancing/xds/xds_wrr_locality.cc

@ -948,10 +948,9 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
CHECK_NE(lb_call_, nullptr);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy_.get() << "] lb_calld=" << this
<< ": Starting LB call " << lb_call_;
}
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -1527,10 +1526,9 @@ class GrpcLb::NullLbTokenEndpointIterator final
void ForEach(absl::FunctionRef<void(const EndpointAddresses&)> callback)
const override {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this
<< "] fallback address: " << endpoint.ToString();
}
callback(EndpointAddresses(endpoint.addresses(),
endpoint.args().SetObject(empty_token_)));
});
@ -1765,10 +1763,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
std::make_unique<Helper>(RefAsSubclass<GrpcLb>(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args), &glb_trace);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Created new child policy handler ("
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Created new child policy handler ("
<< lb_policy.get() << ")";
}
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
@ -1819,10 +1816,9 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
child_policy_ = CreateChildPolicyLocked(update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Updating child policy handler "
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Updating child policy handler "
<< child_policy_.get();
}
// TODO(roth): If we're in fallback mode and the child policy rejects the
// update, we should propagate that failure back to the resolver somehow.
(void)child_policy_->UpdateLocked(std::move(update_args));
@ -1864,10 +1860,9 @@ void GrpcLb::OnSubchannelCacheTimerLocked() {
subchannel_cache_timer_handle_.reset();
auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] removing " << it->second.size()
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] removing " << it->second.size()
<< " subchannels from cache";
}
cached_subchannels_.erase(it);
}
if (!cached_subchannels_.empty()) {

@ -341,10 +341,9 @@ class HealthProducer::ConnectivityWatcher final
//
void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << this << ": starting with subchannel "
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this << ": starting with subchannel "
<< subchannel.get();
}
subchannel_ = std::move(subchannel);
{
MutexLock lock(&mu_);
@ -498,10 +497,9 @@ MakeHealthCheckWatcher(
health_check_service_name =
args.GetOwnedString(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME);
}
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "creating HealthWatcher -- health_check_service_name=\""
GRPC_TRACE_LOG(health_check_client, INFO)
<< "creating HealthWatcher -- health_check_service_name=\""
<< health_check_service_name.value_or("N/A") << "\"";
}
return std::make_unique<HealthWatcher>(std::move(work_serializer),
std::move(health_check_service_name),
std::move(watcher));

@ -274,10 +274,8 @@ void OrcaProducer::MaybeStartStreamLocked() {
void OrcaProducer::NotifyWatchers(
const BackendMetricData& backend_metric_data) {
if (GRPC_TRACE_FLAG_ENABLED(orca_client)) {
LOG(INFO) << "OrcaProducer " << this
<< ": reporting backend metrics to watchers";
}
GRPC_TRACE_LOG(orca_client, INFO)
<< "OrcaProducer " << this << ": reporting backend metrics to watchers";
MutexLock lock(&mu_);
for (OrcaWatcher* watcher : watchers_) {
watcher->watcher()->OnBackendMetricReport(backend_metric_data);

@ -580,10 +580,9 @@ OutlierDetectionLb::OutlierDetectionLb(Args args)
}
OutlierDetectionLb::~OutlierDetectionLb() {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] destroying outlier_detection LB policy";
}
}
void OutlierDetectionLb::ShutdownLocked() {
@ -622,10 +621,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Update outlier detection timer.
if (!config_->CountingEnabled()) {
// No need for timer. Cancel the current timer, if any.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] counting disabled, cancelling timer";
}
ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) {
// No timer running. Start it now.
@ -643,10 +641,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// with the same start time.
// Note that if the new deadline is in the past, the timer will fire
// immediately.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] interval changed, replacing timer";
}
ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), ejection_timer_->StartTime());
}
@ -663,10 +660,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
// Find the entry in the endpoint map.
auto it = endpoint_state_map_.find(key);
if (it == endpoint_state_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] adding endpoint entry for " << key.ToString();
}
// The endpoint is not present in the map, so we'll need to add it.
// Start by getting a pointer to the entry for each address in the
// subchannel map, creating the entry if needed.
@ -722,10 +718,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
for (auto it = endpoint_state_map_.begin();
it != endpoint_state_map_.end();) {
if (current_endpoints.find(it->first) == current_endpoints.end()) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] removing endpoint map entry " << it->first.ToString();
}
it = endpoint_state_map_.erase(it);
} else {
++it;
@ -742,10 +737,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.config = config_->child_policy();
update_args.args = std::move(args.args);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -774,10 +768,9 @@ OrphanablePtr<LoadBalancingPolicy> OutlierDetectionLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&outlier_detection_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -842,10 +835,9 @@ OutlierDetectionLb::EjectionTimer::EjectionTimer(
RefCountedPtr<OutlierDetectionLb> parent, Timestamp start_time)
: parent_(std::move(parent)), start_time_(start_time) {
auto interval = parent_->config_->outlier_detection_config().interval;
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer will run in " << interval.ToString();
}
timer_handle_ = parent_->channel_control_helper()->GetEventEngine()->RunAfter(
interval, [self = Ref(DEBUG_LOCATION, "EjectionTimer")]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -868,10 +860,9 @@ void OutlierDetectionLb::EjectionTimer::Orphan() {
void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
if (!timer_handle_.has_value()) return;
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejection timer running";
}
std::map<EndpointState*, double> success_rate_ejection_candidates;
std::map<EndpointState*, double> failure_percentage_ejection_candidates;
size_t ejected_host_count = 0;
@ -967,10 +958,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
}
candidate.first->Eject(time_now);
++ejected_host_count;
}
@ -1014,10 +1004,9 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
(current_percent < config.max_ejection_percent))) {
// Eject and record the timestamp for use when ejecting addresses in
// this iteration.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] ejecting candidate";
}
candidate.first->Eject(time_now);
++ejected_host_count;
}

@ -631,10 +631,10 @@ void PickFirst::GoIdle() {
void PickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -672,10 +672,9 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
: subchannel_data_(subchannel_data),
pick_first_(subchannel_data_->subchannel_list_->policy_),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get() << "): starting watch";
}
auto watcher = std::make_unique<Watcher>(Ref(DEBUG_LOCATION, "Watcher"));
watcher_ = watcher.get();
subchannel_->WatchConnectivityState(std::move(watcher));
@ -696,10 +695,9 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
}
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << pick_first_.get() << " selected subchannel "
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << pick_first_.get() << " selected subchannel "
<< subchannel_.get();
}
CHECK_NE(subchannel_data_, nullptr);
pick_first_->UnsetSelectedSubchannel(); // Cancel health watch, if any.
pick_first_->selected_ = std::move(subchannel_data_->subchannel_state_);
@ -1006,10 +1004,9 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1037,17 +1034,13 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
}
PickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void PickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
// Cancel Happy Eyeballs timer, if any.
@ -1089,10 +1082,9 @@ void PickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// Re-resolve and report TRANSIENT_FAILURE.
policy_->channel_control_helper()->RequestReresolution();
absl::Status status = absl::UnavailableError(
@ -1564,10 +1556,10 @@ void OldPickFirst::UnsetSelectedSubchannel() {
void OldPickFirst::HealthWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, absl::Status status) {
if (policy_->health_watcher_ != this) return;
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] health watch state update: "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] health watch state update: " << ConnectivityStateName(new_state)
<< " (" << status << ")";
switch (new_state) {
case GRPC_CHANNEL_READY:
policy_->channel_control_helper()->UpdateState(
@ -1912,10 +1904,8 @@ void OldPickFirst::SubchannelList::SubchannelData::
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Cases 1 and 2.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " selected subchannel "
<< subchannel_.get();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " selected subchannel " << subchannel_.get();
p->selected_ = this;
// If health checking is enabled, start the health watch, but don't
// report a new picker -- we want to stay in CONNECTING while we wait
@ -1957,10 +1947,9 @@ OldPickFirst::SubchannelList::SubchannelList(
args_(args.Remove(GRPC_ARG_INTERNAL_PICK_FIRST_ENABLE_HEALTH_CHECKING)
.Remove(
GRPC_ARG_INTERNAL_PICK_FIRST_OMIT_STATUS_MESSAGE_PREFIX)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Creating subchannel list "
<< this << " - channel args: " << args_.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Creating subchannel list " << this
<< " - channel args: " << args_.ToString();
if (addresses == nullptr) return;
// Create a subchannel for each address.
addresses->ForEach([&](const EndpointAddresses& address) {
@ -1987,17 +1976,13 @@ OldPickFirst::SubchannelList::SubchannelList(
}
OldPickFirst::SubchannelList::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Destroying subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Destroying subchannel_list " << this;
}
void OldPickFirst::SubchannelList::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] Shutting down subchannel_list "
<< this;
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] Shutting down subchannel_list " << this;
CHECK(!shutting_down_);
shutting_down_ = true;
for (auto& sd : subchannels_) {
@ -2041,10 +2026,9 @@ void OldPickFirst::SubchannelList::MaybeFinishHappyEyeballsPass() {
// We didn't find another subchannel not in state TRANSIENT_FAILURE,
// so report TRANSIENT_FAILURE and switch to a mode in which we try to
// connect to all addresses in parallel.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << policy_.get() << " subchannel list " << this
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << policy_.get() << " subchannel list " << this
<< " failed to connect to all subchannels";
}
// In case 2, swap to the new subchannel list. This means reporting
// TRANSIENT_FAILURE and dropping the existing (working) connection,
// but we can't ignore what the control plane has told us.

@ -402,10 +402,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
// Create child if needed.
if (child == nullptr) {
@ -461,10 +460,9 @@ void PriorityLb::ChoosePriorityLocked() {
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] trying priority " << priority
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] trying priority " << priority
<< ", child " << child_name;
}
auto& child = children_[child_name];
CHECK(child != nullptr);
if (child->connectivity_state() == GRPC_CHANNEL_CONNECTING) {
@ -626,19 +624,17 @@ void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked() {
PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get()
<< "] creating child " << name_ << " (" << this << ")";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] creating child "
<< name_ << " (" << this << ")";
// Start the failover timer.
failover_timer_ = MakeOrphanable<FailoverTimer>(Ref());
}
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): orphaned";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): orphaned";
failover_timer_.reset();
deactivation_timer_.reset();
// Remove the child policy's interested_parties pollset_set from the
@ -665,10 +661,9 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config,
bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): start update";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): start update";
ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed.
if (child_policy_ == nullptr) {

@ -655,10 +655,9 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
}
});
} else {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] received update with addresses error: "
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "] received update with addresses error: "
<< args.addresses.status();
}
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (!endpoints_.empty()) return args.addresses.status();

@ -1039,10 +1039,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
BuildKeyMap(config_->key_builder_map(), args.path,
lb_policy_->channel_control_helper()->GetAuthority(),
args.initial_metadata)};
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": request keys: " << key.ToString();
}
Timestamp now = Timestamp::Now();
MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) {
@ -1077,10 +1076,9 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
if (entry != nullptr) {
// If the entry has non-expired data, use it.
if (entry->data_expiration_time() >= now) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": using cache entry " << entry;
}
return entry->Pick(args);
}
// If the entry is in backoff, then use the default target if set,
@ -1093,29 +1091,26 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
}
}
// RLS call pending. Queue the pick.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this
<< ": RLS request pending; queuing pick";
}
return PickResult::Queue();
}
LoadBalancingPolicy::PickResult RlsLb::Picker::PickFromDefaultTargetOrFail(
const char* reason, PickArgs args, absl::Status status) {
if (default_child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; using default target";
}
auto pick_result = default_child_policy_->Pick(args);
lb_policy_->MaybeExportPickCount(kMetricDefaultTargetPicks,
config_->default_target(), pick_result);
return pick_result;
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] picker=" << this << ": "
<< reason << "; failing pick";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] picker=" << this << ": " << reason
<< "; failing pick";
auto& stats_plugins =
lb_policy_->channel_control_helper()->GetStatsPluginGroup();
stats_plugins.AddCounter(kMetricFailedPicks, 1,
@ -1204,10 +1199,9 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
lb_policy_->cache_.lru_list_.end(), key)) {}
void RlsLb::Cache::Entry::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] cache entry=" << this
<< " " << lru_iterator_->ToString() << ": cache entry evicted";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": cache entry evicted";
is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
@ -1403,26 +1397,22 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key);
map_.emplace(key, OrphanablePtr<Entry>(entry));
size_ += entry_size;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": cache entry added, entry=" << entry;
}
return entry;
}
// Entry found, so use it.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] key=" << key.ToString()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] key=" << key.ToString()
<< ": found cache entry " << it->second.get();
}
it->second->MarkUsed();
return it->second.get();
}
void RlsLb::Cache::Resize(size_t bytes) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] resizing cache to " << bytes
<< " bytes";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
size_limit_ = bytes;
MaybeShrinkSize(size_limit_);
}
@ -1507,10 +1497,9 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
auto map_it = map_.find(*lru_it);
CHECK(map_it != map_.end());
if (!map_it->second->CanEvict()) break;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString();
}
size_ -= map_it->second->Size();
map_.erase(map_it);
}
@ -1648,10 +1637,9 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
}
void RlsLb::RlsChannel::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ", channel=" << channel_.get() << ": shutdown";
}
is_shutdown_ = true;
if (channel_ != nullptr) {
// Remove channelz linkage.
@ -1715,10 +1703,9 @@ RlsLb::RlsRequest::RlsRequest(
backoff_state_(std::move(backoff_state)),
reason_(reason),
stale_header_data_(std::move(stale_header_data)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< ": RLS request created for key " << key_.ToString();
}
GRPC_CLOSURE_INIT(&call_complete_cb_, OnRlsCallComplete, this, nullptr);
ExecCtx::Run(
DEBUG_LOCATION,
@ -1731,10 +1718,9 @@ RlsLb::RlsRequest::~RlsRequest() { CHECK_EQ(call_, nullptr); }
void RlsLb::RlsRequest::Orphan() {
if (call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString() << ": cancelling RLS call";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
<< key_.ToString() << ": cancelling RLS call";
grpc_call_cancel_internal(call_);
}
Unref(DEBUG_LOCATION, "Orphan");
@ -2031,10 +2017,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
config_->default_target());
created_default_child = true;
} else {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] using existing child for default target";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] using existing child for default target";
default_child_policy_ =
it->second->Ref(DEBUG_LOCATION, "DefaultChildPolicy");
}
@ -2063,10 +2047,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
p.second->StartUpdate();
}
} else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] starting default child policy update";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] starting default child policy update";
default_child_policy_->StartUpdate();
}
}
@ -2084,10 +2066,8 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
}
}
} else if (created_default_child) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this
<< "] finishing default child policy update";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] finishing default child policy update";
absl::Status status = default_child_policy_->MaybeFinishUpdate();
if (!status.ok()) {
errors.emplace_back(absl::StrCat("target ", config_->default_target(),
@ -2186,10 +2166,9 @@ void RlsLb::UpdatePickerLocked() {
if (is_shutdown_) return;
for (auto& p : child_policy_map_) {
grpc_connectivity_state child_state = p.second->connectivity_state();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] target " << p.second->target()
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] target " << p.second->target()
<< " in state " << ConnectivityStateName(child_state);
}
if (child_state == GRPC_CHANNEL_READY) {
state = GRPC_CHANNEL_READY;
break;
@ -2208,10 +2187,8 @@ void RlsLb::UpdatePickerLocked() {
}
}
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] reporting state "
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] reporting state "
<< ConnectivityStateName(state);
}
absl::Status status;
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
status = absl::UnavailableError("no children available");

@ -246,10 +246,9 @@ absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
}
addresses = args.addresses->get();
} else {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] received update with address error: "
<< args.addresses.status();
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << this
<< "] received update with address error: " << args.addresses.status();
// If we already have a child list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -313,10 +312,9 @@ void RoundRobin::RoundRobinEndpointList::RoundRobinEndpoint::OnStateUpdate(
<< status << ")";
}
if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin << "] child " << this
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] child " << this
<< " reported IDLE; requesting connection";
}
ExitIdleLocked();
}
// If state changed, update state counters.
@ -396,10 +394,9 @@ void RoundRobin::RoundRobinEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin << "] reporting READY with child list "
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] reporting READY with child list "
<< this;
}
std::vector<RefCountedPtr<LoadBalancingPolicy::SubchannelPicker>> pickers;
for (const auto& endpoint : endpoints()) {
auto state = endpoint->connectivity_state();
@ -412,10 +409,9 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_READY, absl::OkStatus(),
MakeRefCounted<Picker>(round_robin, std::move(pickers)));
} else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin
<< "] reporting CONNECTING with child list " << this;
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin << "] reporting CONNECTING with child list "
<< this;
round_robin->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -567,18 +567,14 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
}
WeightedRoundRobin::Picker::~Picker() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] destroying picker";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this << "] destroying picker";
}
void WeightedRoundRobin::Picker::Orphaned() {
MutexLock lock(&timer_mu_);
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] cancelling timer";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this << "] cancelling timer";
wrr_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
wrr_.reset();
@ -644,25 +640,22 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
stats_plugins.AddCounter(kMetricEndpointWeightStale, num_stale,
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new weights: " << absl::StrJoin(weights, " ");
}
auto scheduler_or = StaticStrideScheduler::Make(
weights, [this]() { return wrr_->scheduler_state_.fetch_add(1); });
std::shared_ptr<StaticStrideScheduler> scheduler;
if (scheduler_or.has_value()) {
scheduler =
std::make_shared<StaticStrideScheduler>(std::move(*scheduler_or));
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] new scheduler: " << scheduler.get();
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] no scheduler, falling back to RR";
}
stats_plugins.AddCounter(kMetricRrFallback, 1,
{wrr_->channel_control_helper()->GetTarget()},
{wrr_->locality_name_});
@ -690,10 +683,9 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
{
MutexLock lock(&self->timer_mu_);
if (self->timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << self->wrr_.get() << " picker "
<< self.get() << "] timer fired";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << self->wrr_.get() << " picker " << self.get()
<< "] timer fired";
self->BuildSchedulerAndStartTimerLocked();
}
}
@ -715,10 +707,9 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
locality_name_(channel_args()
.GetString(GRPC_ARG_LB_WEIGHTED_TARGET_CHILD)
.value_or("")) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] Created -- locality_name=\""
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Created -- locality_name=\""
<< std::string(locality_name_) << "\"";
}
}
WeightedRoundRobin::~WeightedRoundRobin() {
@ -778,10 +769,9 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
std::make_shared<EndpointAddressesListIterator>(EndpointAddressesList(
ordered_addresses.begin(), ordered_addresses.end()));
} else {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] received update with address error: "
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] received update with address error: "
<< args.addresses.status().ToString();
}
// If we already have an endpoint list, then keep using the existing
// list, but still report back that the update was not accepted.
if (endpoint_list_ != nullptr) return args.addresses.status();
@ -893,10 +883,9 @@ void WeightedRoundRobin::WrrEndpointList::WrrEndpoint::OnStateUpdate(
<< status << ")";
}
if (new_state == GRPC_CHANNEL_IDLE) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr << "] child " << this
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] child " << this
<< " reported IDLE; requesting connection";
}
ExitIdleLocked();
} else if (new_state == GRPC_CHANNEL_READY) {
// If we transition back to READY state, restart the blackout period.
@ -990,18 +979,15 @@ void WeightedRoundRobin::WrrEndpointList::
// 2) ANY child is CONNECTING => policy is CONNECTING.
// 3) ALL children are TRANSIENT_FAILURE => policy is TRANSIENT_FAILURE.
if (num_ready_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr << "] reporting READY with endpoint list "
<< this;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting READY with endpoint list " << this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(),
MakeRefCounted<Picker>(wrr->RefAsSubclass<WeightedRoundRobin>(), this));
} else if (num_connecting_ > 0) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr
<< "] reporting CONNECTING with endpoint list " << this;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting CONNECTING with endpoint list "
<< this;
wrr->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));

@ -290,10 +290,9 @@ WeightedTargetLb::WeightedTargetLb(Args args)
}
WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this
<< "] destroying weighted_target LB policy";
}
}
void WeightedTargetLb::ShutdownLocked() {
@ -382,10 +381,9 @@ void WeightedTargetLb::UpdateStateLocked() {
// all children. This avoids unnecessary picker churn while an update
// is being propagated to our children.
if (update_in_progress_) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this
<< "] scanning children to determine connectivity state";
}
// Construct lists of child pickers with associated weights, one for
// children that are in state READY and another for children that are
// in state TRANSIENT_FAILURE. Each child is represented by a portion of
@ -450,10 +448,9 @@ void WeightedTargetLb::UpdateStateLocked() {
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] connectivity changed to "
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] connectivity changed to "
<< ConnectivityStateName(connectivity_state);
}
RefCountedPtr<SubchannelPicker> picker;
absl::Status status;
switch (connectivity_state) {
@ -525,10 +522,9 @@ WeightedTargetLb::WeightedChild::WeightedChild(
: weighted_target_policy_(std::move(weighted_target_policy)),
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] created WeightedChild " << this << " for " << name_;
}
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
@ -657,10 +653,9 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": deactivating";
}
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.

@ -662,10 +662,9 @@ Json CdsLb::CreateChildPolicyConfigForLeafCluster(
{"outlier_detection_experimental",
Json::FromObject(std::move(outlier_detection_config))},
})});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: "
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(outlier_detection_policy, /*indent=*/1);
}
return outlier_detection_policy;
}
@ -695,10 +694,9 @@ Json CdsLb::CreateChildPolicyConfigForAggregateCluster(
{"priorities", Json::FromArray(std::move(priority_priorities))},
})},
})});
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] generated config for child policy: "
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] generated config for child policy: "
<< JsonDump(json, /*indent=*/1);
}
return json;
}
@ -714,10 +712,8 @@ void CdsLb::ResetState() {
}
void CdsLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] reporting TRANSIENT_FAILURE: " << status;
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,

@ -404,10 +404,9 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_config_(xds_cluster_impl_lb->drop_config_),
drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << xds_cluster_impl_lb
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << xds_cluster_impl_lb
<< "] constructed new picker " << this;
}
}
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
@ -500,17 +499,15 @@ LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<GrpcXdsClient> xds_client,
Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] created -- using xds client " << xds_client_.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] created -- using xds client "
<< xds_client_.get();
}
XdsClusterImplLb::~XdsClusterImplLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] destroying xds_cluster_impl LB policy";
}
}
void XdsClusterImplLb::ShutdownLocked() {
@ -537,10 +534,9 @@ void XdsClusterImplLb::ResetState() {
}
void XdsClusterImplLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -769,10 +765,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_impl_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -796,10 +791,9 @@ absl::Status XdsClusterImplLb::UpdateChildPolicyLocked(
update_args.args =
args.Set(GRPC_ARG_XDS_CLUSTER_NAME, config_->cluster_name());
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}

@ -249,10 +249,9 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
: LoadBalancingPolicy(std::move(args)) {}
XdsClusterManagerLb::~XdsClusterManagerLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this
<< "] destroying xds_cluster_manager LB policy";
}
}
void XdsClusterManagerLb::ShutdownLocked() {
@ -406,17 +405,15 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
: xds_cluster_manager_policy_(std::move(xds_cluster_manager_policy)),
name_(name),
picker_(MakeRefCounted<QueuePicker>(nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] created ClusterChild " << this << " for " << name_;
}
}
XdsClusterManagerLb::ClusterChild::~ClusterChild() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << ": destroying child";
}
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
}

@ -464,10 +464,9 @@ XdsOverrideHostLb::Picker::Picker(
: policy_(std::move(xds_override_host_lb)),
picker_(std::move(picker)),
override_host_health_status_set_(override_host_health_status_set) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get()
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get()
<< "] constructed new picker " << this;
}
}
absl::optional<LoadBalancingPolicy::PickResult>
@ -605,10 +604,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
// Min time between timer runs is 5s so that we don't kill ourselves
// with lock contention and CPU usage due to sweeps over the map.
duration = std::max(duration, Duration::Seconds(5));
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": subchannel cleanup pass will run in " << duration;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": subchannel cleanup pass will run in " << duration;
timer_handle_ = policy_->channel_control_helper()->GetEventEngine()->RunAfter(
duration, [self = RefAsSubclass<IdleTimer>()]() mutable {
ApplicationCallbackExecCtx callback_exec_ctx;
@ -622,10 +620,9 @@ XdsOverrideHostLb::IdleTimer::IdleTimer(RefCountedPtr<XdsOverrideHostLb> policy,
void XdsOverrideHostLb::IdleTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": cancelling";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": cancelling";
policy_->channel_control_helper()->GetEventEngine()->Cancel(*timer_handle_);
timer_handle_.reset();
}
@ -635,10 +632,9 @@ void XdsOverrideHostLb::IdleTimer::Orphan() {
void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get() << "] idle timer "
<< this << ": timer fired";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] idle timer " << this
<< ": timer fired";
policy_->CleanupSubchannels();
}
}
@ -655,10 +651,9 @@ XdsOverrideHostLb::XdsOverrideHostLb(Args args)
}
XdsOverrideHostLb::~XdsOverrideHostLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] destroying xds_override_host LB policy";
}
}
void XdsOverrideHostLb::ShutdownLocked() {
@ -695,10 +690,9 @@ void XdsOverrideHostLb::ResetState() {
}
void XdsOverrideHostLb::ReportTransientFailure(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] reporting TRANSIENT_FAILURE: " << status;
}
ResetState();
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, status,
@ -790,11 +784,10 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
args.addresses =
std::make_shared<ChildEndpointIterator>(std::move(*args.addresses));
} else {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] address error: " << args.addresses.status();
}
}
// Create child policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args.args);
@ -805,10 +798,9 @@ absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.config = new_config->child_config();
update_args.args = args_;
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] Updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -837,10 +829,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsOverrideHostLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_override_host_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] Created new child policy handler " << lb_policy.get();
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -877,10 +868,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
for (const auto& address : endpoint.addresses()) {
auto key = grpc_sockaddr_to_string(&address, /*normalize=*/false);
if (!key.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] no key for endpoint address; not adding to map";
}
} else {
addresses.push_back(*std::move(key));
}
@ -907,10 +897,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
MutexLock lock(&mu_);
for (auto it = subchannel_map_.begin(); it != subchannel_map_.end();) {
if (addresses_for_map.find(it->first) == addresses_for_map.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] removing map key "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] removing map key "
<< it->first;
}
it->second->UnsetSubchannel(&subchannel_refs_to_drop);
it = subchannel_map_.erase(it);
} else {
@ -922,10 +911,9 @@ void XdsOverrideHostLb::UpdateAddressMap(
auto& address_info = p.second;
auto it = subchannel_map_.find(address);
if (it == subchannel_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] adding map key "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] adding map key "
<< address;
}
it = subchannel_map_.emplace(address, MakeRefCounted<SubchannelEntry>())
.first;
}
@ -973,10 +961,9 @@ XdsOverrideHostLb::AdoptSubchannel(
}
void XdsOverrideHostLb::CreateSubchannelForAddress(absl::string_view address) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
<< "] creating owned subchannel for " << address;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] creating owned subchannel for "
<< address;
auto addr = StringToSockaddr(address);
CHECK(addr.ok());
// Note: We don't currently have any cases where per_address_args need to
@ -1016,10 +1003,9 @@ void XdsOverrideHostLb::CleanupSubchannels() {
if (p.second->last_used_time() <= idle_threshold) {
auto subchannel = p.second->TakeOwnedSubchannel();
if (subchannel != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this
<< "] dropping subchannel for " << p.first;
}
subchannel_refs_to_drop.push_back(std::move(subchannel));
}
} else {
@ -1093,10 +1079,9 @@ void XdsOverrideHostLb::SubchannelWrapper::CancelConnectivityStateWatch(
}
void XdsOverrideHostLb::SubchannelWrapper::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << policy_.get()
<< "] subchannel wrapper " << this << " orphaned";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << policy_.get() << "] subchannel wrapper "
<< this << " orphaned";
if (!IsWorkSerializerDispatchEnabled()) {
wrapped_subchannel()->CancelConnectivityStateWatch(watcher_);
if (subchannel_entry_ != nullptr) {
@ -1206,20 +1191,19 @@ void XdsOverrideHostLb::SubchannelEntry::OnSubchannelWrapperOrphan(
auto* subchannel = GetSubchannel();
if (subchannel != wrapper) return;
if (last_used_time_ < (Timestamp::Now() - connection_idle_timeout)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb] removing unowned subchannel wrapper "
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] removing unowned subchannel "
"wrapper "
<< subchannel;
}
subchannel_ = nullptr;
} else {
// The subchannel is being released by the child policy, but it
// is still within its idle timeout, so we make a new copy of
// the wrapper with the same underlying subchannel, and we hold
// our own ref to it.
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb] subchannel wrapper " << subchannel
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb] subchannel wrapper " << subchannel
<< ": cloning to gain ownership";
}
subchannel_ = wrapper->Clone();
}
}

@ -239,10 +239,9 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
update_args.resolution_note = std::move(args.resolution_note);
update_args.args = std::move(args.args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] updating child policy "
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] updating child policy "
<< child_policy_.get();
}
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -256,10 +255,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsWrrLocalityLb::CreateChildPolicyLocked(
auto lb_policy =
CoreConfiguration::Get().lb_policy_registry().CreateLoadBalancingPolicy(
"weighted_target_experimental", std::move(lb_policy_args));
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this
<< "] created new child policy " << lb_policy.get();
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] created new child policy "
<< lb_policy.get();
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this LB policy, which in turn is tied to the application's call.

Loading…
Cancel
Save