|
|
|
@ -105,7 +105,6 @@ namespace { |
|
|
|
|
class ChannelData { |
|
|
|
|
public: |
|
|
|
|
struct QueuedPick { |
|
|
|
|
LoadBalancingPolicy::PickArgs pick; |
|
|
|
|
grpc_call_element* elem; |
|
|
|
|
QueuedPick* next = nullptr; |
|
|
|
|
}; |
|
|
|
@ -223,7 +222,7 @@ class ChannelData { |
|
|
|
|
|
|
|
|
|
static bool ProcessResolverResultLocked( |
|
|
|
|
void* arg, Resolver::Result* result, const char** lb_policy_name, |
|
|
|
|
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config, |
|
|
|
|
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config, |
|
|
|
|
grpc_error** service_config_error); |
|
|
|
|
|
|
|
|
|
grpc_error* DoPingLocked(grpc_transport_op* op); |
|
|
|
@ -236,7 +235,7 @@ class ChannelData { |
|
|
|
|
const Resolver::Result& resolver_result, |
|
|
|
|
const internal::ClientChannelGlobalParsedConfig* parsed_service_config, |
|
|
|
|
UniquePtr<char>* lb_policy_name, |
|
|
|
|
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config); |
|
|
|
|
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config); |
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Fields set at construction and never modified.
|
|
|
|
@ -314,6 +313,16 @@ class CallData { |
|
|
|
|
private: |
|
|
|
|
class QueuedPickCanceller; |
|
|
|
|
|
|
|
|
|
class LbCallState : public LoadBalancingPolicy::CallState { |
|
|
|
|
public: |
|
|
|
|
explicit LbCallState(CallData* calld) : calld_(calld) {} |
|
|
|
|
|
|
|
|
|
void* Alloc(size_t size) override { return calld_->arena_->Alloc(size); } |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
CallData* calld_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
// State used for starting a retryable batch on a subchannel call.
|
|
|
|
|
// This provides its own grpc_transport_stream_op_batch and other data
|
|
|
|
|
// structures needed to populate the ops in the batch.
|
|
|
|
@ -449,8 +458,9 @@ class CallData { |
|
|
|
|
grpc_call_element* elem, SubchannelCallBatchData* batch_data, |
|
|
|
|
SubchannelCallRetryState* retry_state); |
|
|
|
|
|
|
|
|
|
static void MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
const LoadBalancingPolicy::PickArgs& pick, |
|
|
|
|
static void RecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
void* arg, grpc_error* error); |
|
|
|
|
void MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
grpc_transport_stream_op_batch* batch); |
|
|
|
|
|
|
|
|
|
// Returns the index into pending_batches_ to be used for batch.
|
|
|
|
@ -640,8 +650,19 @@ class CallData { |
|
|
|
|
bool pick_queued_ = false; |
|
|
|
|
bool service_config_applied_ = false; |
|
|
|
|
QueuedPickCanceller* pick_canceller_ = nullptr; |
|
|
|
|
LbCallState lb_call_state_; |
|
|
|
|
RefCountedPtr<ConnectedSubchannel> connected_subchannel_; |
|
|
|
|
void (*lb_recv_trailing_metadata_ready_)( |
|
|
|
|
void* user_data, grpc_metadata_batch* recv_trailing_metadata, |
|
|
|
|
LoadBalancingPolicy::CallState* call_state) = nullptr; |
|
|
|
|
void* lb_recv_trailing_metadata_ready_user_data_ = nullptr; |
|
|
|
|
grpc_closure pick_closure_; |
|
|
|
|
|
|
|
|
|
// For intercepting recv_trailing_metadata_ready for the LB policy.
|
|
|
|
|
grpc_metadata_batch* recv_trailing_metadata_ = nullptr; |
|
|
|
|
grpc_closure recv_trailing_metadata_ready_; |
|
|
|
|
grpc_closure* original_recv_trailing_metadata_ready_ = nullptr; |
|
|
|
|
|
|
|
|
|
grpc_polling_entity* pollent_ = nullptr; |
|
|
|
|
|
|
|
|
|
// Batches are added to this list when received from above.
|
|
|
|
@ -1143,7 +1164,7 @@ void ChannelData::ProcessLbPolicy( |
|
|
|
|
const Resolver::Result& resolver_result, |
|
|
|
|
const internal::ClientChannelGlobalParsedConfig* parsed_service_config, |
|
|
|
|
UniquePtr<char>* lb_policy_name, |
|
|
|
|
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config) { |
|
|
|
|
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config) { |
|
|
|
|
// Prefer the LB policy name found in the service config.
|
|
|
|
|
if (parsed_service_config != nullptr && |
|
|
|
|
parsed_service_config->parsed_lb_config() != nullptr) { |
|
|
|
@ -1191,7 +1212,7 @@ void ChannelData::ProcessLbPolicy( |
|
|
|
|
// resolver result update.
|
|
|
|
|
bool ChannelData::ProcessResolverResultLocked( |
|
|
|
|
void* arg, Resolver::Result* result, const char** lb_policy_name, |
|
|
|
|
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config, |
|
|
|
|
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config, |
|
|
|
|
grpc_error** service_config_error) { |
|
|
|
|
ChannelData* chand = static_cast<ChannelData*>(arg); |
|
|
|
|
RefCountedPtr<ServiceConfig> service_config; |
|
|
|
@ -1312,19 +1333,18 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) { |
|
|
|
|
if (grpc_connectivity_state_check(&state_tracker_) != GRPC_CHANNEL_READY) { |
|
|
|
|
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected"); |
|
|
|
|
} |
|
|
|
|
LoadBalancingPolicy::PickArgs pick; |
|
|
|
|
grpc_error* error = GRPC_ERROR_NONE; |
|
|
|
|
picker_->Pick(&pick, &error); |
|
|
|
|
if (pick.connected_subchannel != nullptr) { |
|
|
|
|
pick.connected_subchannel->Ping(op->send_ping.on_initiate, |
|
|
|
|
LoadBalancingPolicy::PickResult result = |
|
|
|
|
picker_->Pick(LoadBalancingPolicy::PickArgs()); |
|
|
|
|
if (result.connected_subchannel != nullptr) { |
|
|
|
|
result.connected_subchannel->Ping(op->send_ping.on_initiate, |
|
|
|
|
op->send_ping.on_ack); |
|
|
|
|
} else { |
|
|
|
|
if (error == GRPC_ERROR_NONE) { |
|
|
|
|
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
|
if (result.error == GRPC_ERROR_NONE) { |
|
|
|
|
result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
|
"LB policy dropped call on ping"); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
return error; |
|
|
|
|
return result.error; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void ChannelData::StartTransportOpLocked(void* arg, grpc_error* ignored) { |
|
|
|
@ -1505,6 +1525,7 @@ CallData::CallData(grpc_call_element* elem, const ChannelData& chand, |
|
|
|
|
owning_call_(args.call_stack), |
|
|
|
|
call_combiner_(args.call_combiner), |
|
|
|
|
call_context_(args.context), |
|
|
|
|
lb_call_state_(this), |
|
|
|
|
pending_send_initial_metadata_(false), |
|
|
|
|
pending_send_message_(false), |
|
|
|
|
pending_send_trailing_metadata_(false), |
|
|
|
@ -1737,18 +1758,30 @@ void CallData::FreeCachedSendOpDataForCompletedBatch( |
|
|
|
|
// LB recv_trailing_metadata_ready handling
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
void CallData::RecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
void* arg, grpc_error* error) { |
|
|
|
|
CallData* calld = static_cast<CallData*>(arg); |
|
|
|
|
// Invoke callback to LB policy.
|
|
|
|
|
calld->lb_recv_trailing_metadata_ready_( |
|
|
|
|
calld->lb_recv_trailing_metadata_ready_user_data_, |
|
|
|
|
calld->recv_trailing_metadata_, &calld->lb_call_state_); |
|
|
|
|
// Chain to original callback.
|
|
|
|
|
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready_, |
|
|
|
|
GRPC_ERROR_REF(error)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void CallData::MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
const LoadBalancingPolicy::PickArgs& pick, |
|
|
|
|
grpc_transport_stream_op_batch* batch) { |
|
|
|
|
if (pick.recv_trailing_metadata_ready != nullptr) { |
|
|
|
|
*pick.original_recv_trailing_metadata_ready = |
|
|
|
|
if (lb_recv_trailing_metadata_ready_ != nullptr) { |
|
|
|
|
recv_trailing_metadata_ = |
|
|
|
|
batch->payload->recv_trailing_metadata.recv_trailing_metadata; |
|
|
|
|
original_recv_trailing_metadata_ready_ = |
|
|
|
|
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready; |
|
|
|
|
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, |
|
|
|
|
RecvTrailingMetadataReadyForLoadBalancingPolicy, this, |
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready = |
|
|
|
|
pick.recv_trailing_metadata_ready; |
|
|
|
|
if (pick.recv_trailing_metadata != nullptr) { |
|
|
|
|
*pick.recv_trailing_metadata = |
|
|
|
|
batch->payload->recv_trailing_metadata.recv_trailing_metadata; |
|
|
|
|
} |
|
|
|
|
&recv_trailing_metadata_ready_; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -1894,8 +1927,7 @@ void CallData::PendingBatchesFail( |
|
|
|
|
grpc_transport_stream_op_batch* batch = pending->batch; |
|
|
|
|
if (batch != nullptr) { |
|
|
|
|
if (batch->recv_trailing_metadata) { |
|
|
|
|
MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(pick_.pick, |
|
|
|
|
batch); |
|
|
|
|
MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch); |
|
|
|
|
} |
|
|
|
|
batch->handler_private.extra_arg = this; |
|
|
|
|
GRPC_CLOSURE_INIT(&batch->handler_private.closure, |
|
|
|
@ -1949,8 +1981,7 @@ void CallData::PendingBatchesResume(grpc_call_element* elem) { |
|
|
|
|
grpc_transport_stream_op_batch* batch = pending->batch; |
|
|
|
|
if (batch != nullptr) { |
|
|
|
|
if (batch->recv_trailing_metadata) { |
|
|
|
|
MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(pick_.pick, |
|
|
|
|
batch); |
|
|
|
|
MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch); |
|
|
|
|
} |
|
|
|
|
batch->handler_private.extra_arg = subchannel_call_.get(); |
|
|
|
|
GRPC_CLOSURE_INIT(&batch->handler_private.closure, |
|
|
|
@ -2011,7 +2042,7 @@ void CallData::DoRetry(grpc_call_element* elem, |
|
|
|
|
GPR_ASSERT(retry_policy != nullptr); |
|
|
|
|
// Reset subchannel call and connected subchannel.
|
|
|
|
|
subchannel_call_.reset(); |
|
|
|
|
pick_.pick.connected_subchannel.reset(); |
|
|
|
|
connected_subchannel_.reset(); |
|
|
|
|
// Compute backoff delay.
|
|
|
|
|
grpc_millis next_attempt_time; |
|
|
|
|
if (server_pushback_ms >= 0) { |
|
|
|
@ -2868,7 +2899,7 @@ void CallData::AddRetriableRecvTrailingMetadataOp( |
|
|
|
|
.recv_trailing_metadata_ready = |
|
|
|
|
&retry_state->recv_trailing_metadata_ready; |
|
|
|
|
MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy( |
|
|
|
|
pick_.pick, &batch_data->batch); |
|
|
|
|
&batch_data->batch); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void CallData::StartInternalRecvTrailingMetadata(grpc_call_element* elem) { |
|
|
|
@ -3135,8 +3166,7 @@ void CallData::CreateSubchannelCall(grpc_call_element* elem) { |
|
|
|
|
// need to use a separate call context for each subchannel call.
|
|
|
|
|
call_context_, call_combiner_, parent_data_size}; |
|
|
|
|
grpc_error* error = GRPC_ERROR_NONE; |
|
|
|
|
subchannel_call_ = |
|
|
|
|
pick_.pick.connected_subchannel->CreateCall(call_args, &error); |
|
|
|
|
subchannel_call_ = connected_subchannel_->CreateCall(call_args, &error); |
|
|
|
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) { |
|
|
|
|
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s", |
|
|
|
|
chand, this, subchannel_call_.get(), grpc_error_string(error)); |
|
|
|
@ -3297,13 +3327,14 @@ void CallData::MaybeApplyServiceConfigToCallLocked(grpc_call_element* elem) { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const char* PickResultName(LoadBalancingPolicy::PickResult result) { |
|
|
|
|
switch (result) { |
|
|
|
|
case LoadBalancingPolicy::PICK_COMPLETE: |
|
|
|
|
const char* PickResultTypeName( |
|
|
|
|
LoadBalancingPolicy::PickResult::ResultType type) { |
|
|
|
|
switch (type) { |
|
|
|
|
case LoadBalancingPolicy::PickResult::PICK_COMPLETE: |
|
|
|
|
return "COMPLETE"; |
|
|
|
|
case LoadBalancingPolicy::PICK_QUEUE: |
|
|
|
|
case LoadBalancingPolicy::PickResult::PICK_QUEUE: |
|
|
|
|
return "QUEUE"; |
|
|
|
|
case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE: |
|
|
|
|
case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE: |
|
|
|
|
return "TRANSIENT_FAILURE"; |
|
|
|
|
} |
|
|
|
|
GPR_UNREACHABLE_CODE(return "UNKNOWN"); |
|
|
|
@ -3313,8 +3344,10 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) { |
|
|
|
|
grpc_call_element* elem = static_cast<grpc_call_element*>(arg); |
|
|
|
|
CallData* calld = static_cast<CallData*>(elem->call_data); |
|
|
|
|
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data); |
|
|
|
|
GPR_ASSERT(calld->pick_.pick.connected_subchannel == nullptr); |
|
|
|
|
GPR_ASSERT(calld->connected_subchannel_ == nullptr); |
|
|
|
|
GPR_ASSERT(calld->subchannel_call_ == nullptr); |
|
|
|
|
// Apply service config to call if needed.
|
|
|
|
|
calld->MaybeApplyServiceConfigToCallLocked(elem); |
|
|
|
|
// If this is a retry, use the send_initial_metadata payload that
|
|
|
|
|
// we've cached; otherwise, use the pending batch. The
|
|
|
|
|
// send_initial_metadata batch will be the first pending batch in the
|
|
|
|
@ -3325,58 +3358,58 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) { |
|
|
|
|
// allocate the subchannel batch earlier so that we can give the
|
|
|
|
|
// subchannel's copy of the metadata batch (which is copied for each
|
|
|
|
|
// attempt) to the LB policy instead the one from the parent channel.
|
|
|
|
|
calld->pick_.pick.initial_metadata = |
|
|
|
|
LoadBalancingPolicy::PickArgs pick_args; |
|
|
|
|
pick_args.call_state = &calld->lb_call_state_; |
|
|
|
|
pick_args.initial_metadata = |
|
|
|
|
calld->seen_send_initial_metadata_ |
|
|
|
|
? &calld->send_initial_metadata_ |
|
|
|
|
: calld->pending_batches_[0] |
|
|
|
|
.batch->payload->send_initial_metadata.send_initial_metadata; |
|
|
|
|
uint32_t* send_initial_metadata_flags = |
|
|
|
|
// Grab initial metadata flags so that we can check later if the call has
|
|
|
|
|
// wait_for_ready enabled.
|
|
|
|
|
const uint32_t send_initial_metadata_flags = |
|
|
|
|
calld->seen_send_initial_metadata_ |
|
|
|
|
? &calld->send_initial_metadata_flags_ |
|
|
|
|
: &calld->pending_batches_[0] |
|
|
|
|
? calld->send_initial_metadata_flags_ |
|
|
|
|
: calld->pending_batches_[0] |
|
|
|
|
.batch->payload->send_initial_metadata |
|
|
|
|
.send_initial_metadata_flags; |
|
|
|
|
// Apply service config to call if needed.
|
|
|
|
|
calld->MaybeApplyServiceConfigToCallLocked(elem); |
|
|
|
|
// When done, we schedule this closure to leave the data plane combiner.
|
|
|
|
|
GRPC_CLOSURE_INIT(&calld->pick_closure_, PickDone, elem, |
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
// Attempt pick.
|
|
|
|
|
error = GRPC_ERROR_NONE; |
|
|
|
|
auto pick_result = chand->picker()->Pick(&calld->pick_.pick, &error); |
|
|
|
|
auto result = chand->picker()->Pick(pick_args); |
|
|
|
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) { |
|
|
|
|
gpr_log(GPR_INFO, |
|
|
|
|
"chand=%p calld=%p: LB pick returned %s (connected_subchannel=%p, " |
|
|
|
|
"error=%s)", |
|
|
|
|
chand, calld, PickResultName(pick_result), |
|
|
|
|
calld->pick_.pick.connected_subchannel.get(), |
|
|
|
|
grpc_error_string(error)); |
|
|
|
|
chand, calld, PickResultTypeName(result.type), |
|
|
|
|
result.connected_subchannel.get(), grpc_error_string(result.error)); |
|
|
|
|
} |
|
|
|
|
switch (pick_result) { |
|
|
|
|
case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE: { |
|
|
|
|
switch (result.type) { |
|
|
|
|
case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE: { |
|
|
|
|
// If we're shutting down, fail all RPCs.
|
|
|
|
|
grpc_error* disconnect_error = chand->disconnect_error(); |
|
|
|
|
if (disconnect_error != GRPC_ERROR_NONE) { |
|
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
|
GRPC_ERROR_UNREF(result.error); |
|
|
|
|
GRPC_CLOSURE_SCHED(&calld->pick_closure_, |
|
|
|
|
GRPC_ERROR_REF(disconnect_error)); |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
// If wait_for_ready is false, then the error indicates the RPC
|
|
|
|
|
// attempt's final status.
|
|
|
|
|
if ((*send_initial_metadata_flags & |
|
|
|
|
if ((send_initial_metadata_flags & |
|
|
|
|
GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) { |
|
|
|
|
// Retry if appropriate; otherwise, fail.
|
|
|
|
|
grpc_status_code status = GRPC_STATUS_OK; |
|
|
|
|
grpc_error_get_status(error, calld->deadline_, &status, nullptr, |
|
|
|
|
grpc_error_get_status(result.error, calld->deadline_, &status, nullptr, |
|
|
|
|
nullptr, nullptr); |
|
|
|
|
if (!calld->enable_retries_ || |
|
|
|
|
!calld->MaybeRetry(elem, nullptr /* batch_data */, status, |
|
|
|
|
nullptr /* server_pushback_md */)) { |
|
|
|
|
grpc_error* new_error = |
|
|
|
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
|
|
"Failed to pick subchannel", &error, 1); |
|
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
|
"Failed to pick subchannel", &result.error, 1); |
|
|
|
|
GRPC_ERROR_UNREF(result.error); |
|
|
|
|
GRPC_CLOSURE_SCHED(&calld->pick_closure_, new_error); |
|
|
|
|
} |
|
|
|
|
if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem); |
|
|
|
@ -3384,19 +3417,24 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) { |
|
|
|
|
} |
|
|
|
|
// If wait_for_ready is true, then queue to retry when we get a new
|
|
|
|
|
// picker.
|
|
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
|
GRPC_ERROR_UNREF(result.error); |
|
|
|
|
} |
|
|
|
|
// Fallthrough
|
|
|
|
|
case LoadBalancingPolicy::PICK_QUEUE: |
|
|
|
|
case LoadBalancingPolicy::PickResult::PICK_QUEUE: |
|
|
|
|
if (!calld->pick_queued_) calld->AddCallToQueuedPicksLocked(elem); |
|
|
|
|
break; |
|
|
|
|
default: // PICK_COMPLETE
|
|
|
|
|
// Handle drops.
|
|
|
|
|
if (GPR_UNLIKELY(calld->pick_.pick.connected_subchannel == nullptr)) { |
|
|
|
|
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
|
if (GPR_UNLIKELY(result.connected_subchannel == nullptr)) { |
|
|
|
|
result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
|
"Call dropped by load balancing policy"); |
|
|
|
|
} |
|
|
|
|
GRPC_CLOSURE_SCHED(&calld->pick_closure_, error); |
|
|
|
|
calld->connected_subchannel_ = std::move(result.connected_subchannel); |
|
|
|
|
calld->lb_recv_trailing_metadata_ready_ = |
|
|
|
|
result.recv_trailing_metadata_ready; |
|
|
|
|
calld->lb_recv_trailing_metadata_ready_user_data_ = |
|
|
|
|
result.recv_trailing_metadata_ready_user_data; |
|
|
|
|
GRPC_CLOSURE_SCHED(&calld->pick_closure_, result.error); |
|
|
|
|
if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|