Code review changes.

pull/18096/head
Mark D. Roth 6 years ago
parent 523e537368
commit 148ae20e44
  1. 11
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  2. 13
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc

@ -278,7 +278,7 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
// READY, then select it immediately. This can happen when the
// currently selected subchannel is also present in the update. It
// can also happen if one of the subchannels in the update is already
// in the subchannel index because it's in use by another channel.
// in the global subchannel pool because it's in use by another channel.
// TODO(roth): If we're in IDLE state, we should probably defer this
// check and instead do it in ExitIdleLocked().
for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
@ -376,11 +376,10 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(new_error)));
} else {
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected subchannel goes bad, request a re-resolution. We also
// set the channel state to IDLE and reset idle_. The reason
// is that if the new state is TRANSIENT_FAILURE due to a GOAWAY
// reception we don't want to connect to the re-resolved backends until
// we leave the IDLE state.
// If the selected subchannel goes bad, request a re-resolution. We
// also set the channel state to IDLE. The reason is that if the new
// state is TRANSIENT_FAILURE due to a GOAWAY reception we don't want
// to connect to the re-resolved backends until we leave IDLE state.
p->idle_ = true;
p->channel_control_helper()->RequestReresolution();
// In transient failure. Rely on re-resolution to recover.

@ -1138,11 +1138,7 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_fallback_timer");
self.release();
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure
GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
@ -1159,11 +1155,8 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
watching_lb_channel_ = true;
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
self.release();
// Ref held by closure.
Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()),

Loading…
Cancel
Save