@ -1356,11 +1356,12 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
grpc_error_handle state_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING (
" Resolver transient failure " , & error , 1 ) ;
absl : : Status status = grpc_error_to_absl_status ( state_error ) ;
{
MutexLock lock ( & resolution_mu_ ) ;
// Update resolver transient failure.
GRPC_ERROR_UNREF ( resolver_transient_failure_error_ ) ;
resolver_transient_failure_error_ = GRPC_ERROR_REF ( state_error ) ;
resolver_transient_failure_error_ = state_error ;
// Process calls that were queued waiting for the resolver result.
for ( ResolverQueuedCall * call = resolver_queued_calls_ ; call ! = nullptr ;
call = call - > next ) {
@ -1374,10 +1375,8 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
}
// Update connectivity state.
UpdateStateAndPickerLocked (
GRPC_CHANNEL_TRANSIENT_FAILURE , grpc_error_to_absl_status ( state_error ) ,
" resolver failure " ,
absl : : make_unique < LoadBalancingPolicy : : TransientFailurePicker > (
state_error ) ) ;
GRPC_CHANNEL_TRANSIENT_FAILURE , status , " resolver failure " ,
absl : : make_unique < LoadBalancingPolicy : : TransientFailurePicker > ( status ) ) ;
}
GRPC_ERROR_UNREF ( error ) ;
}
@ -1688,6 +1687,40 @@ void ClientChannel::UpdateStateAndPickerLocked(
pending_subchannel_updates_ . clear ( ) ;
}
namespace {
// TODO(roth): Remove this in favor of the gprpp Match() function once
// we can do that without breaking lock annotations.
template < typename T >
T HandlePickResult (
LoadBalancingPolicy : : PickResult * result ,
std : : function < T ( LoadBalancingPolicy : : PickResult : : Complete * ) > complete_func ,
std : : function < T ( LoadBalancingPolicy : : PickResult : : Queue * ) > queue_func ,
std : : function < T ( LoadBalancingPolicy : : PickResult : : Fail * ) > fail_func ,
std : : function < T ( LoadBalancingPolicy : : PickResult : : Drop * ) > drop_func ) {
auto * complete_pick =
absl : : get_if < LoadBalancingPolicy : : PickResult : : Complete > ( & result - > result ) ;
if ( complete_pick ! = nullptr ) {
return complete_func ( complete_pick ) ;
}
auto * queue_pick =
absl : : get_if < LoadBalancingPolicy : : PickResult : : Queue > ( & result - > result ) ;
if ( queue_pick ! = nullptr ) {
return queue_func ( queue_pick ) ;
}
auto * fail_pick =
absl : : get_if < LoadBalancingPolicy : : PickResult : : Fail > ( & result - > result ) ;
if ( fail_pick ! = nullptr ) {
return fail_func ( fail_pick ) ;
}
auto * drop_pick =
absl : : get_if < LoadBalancingPolicy : : PickResult : : Drop > ( & result - > result ) ;
GPR_ASSERT ( drop_pick ! = nullptr ) ;
return drop_func ( drop_pick ) ;
}
} // namespace
grpc_error_handle ClientChannel : : DoPingLocked ( grpc_transport_op * op ) {
if ( state_tracker_ . state ( ) ! = GRPC_CHANNEL_READY ) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " channel not connected " ) ;
@ -1697,21 +1730,31 @@ grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
MutexLock lock ( & data_plane_mu_ ) ;
result = picker_ - > Pick ( LoadBalancingPolicy : : PickArgs ( ) ) ;
}
ConnectedSubchannel * connected_subchannel = nullptr ;
if ( result . subchannel ! = nullptr ) {
SubchannelWrapper * subchannel =
static_cast < SubchannelWrapper * > ( result . subchannel . get ( ) ) ;
connected_subchannel = subchannel - > connected_subchannel ( ) ;
}
if ( connected_subchannel ! = nullptr ) {
connected_subchannel - > Ping ( op - > send_ping . on_initiate , op - > send_ping . on_ack ) ;
} else {
if ( result . error = = GRPC_ERROR_NONE ) {
result . error = GRPC_ERROR_CREATE_FROM_STATIC_STRING (
" LB policy dropped call on ping " ) ;
}
}
return result . error ;
return HandlePickResult < grpc_error_handle > (
& result ,
// Complete pick.
[ op ] ( LoadBalancingPolicy : : PickResult : : Complete * complete_pick )
ABSL_EXCLUSIVE_LOCKS_REQUIRED ( & ClientChannel : : work_serializer_ ) {
SubchannelWrapper * subchannel = static_cast < SubchannelWrapper * > (
complete_pick - > subchannel . get ( ) ) ;
ConnectedSubchannel * connected_subchannel =
subchannel - > connected_subchannel ( ) ;
connected_subchannel - > Ping ( op - > send_ping . on_initiate ,
op - > send_ping . on_ack ) ;
return GRPC_ERROR_NONE ;
} ,
// Queue pick.
[ ] ( LoadBalancingPolicy : : PickResult : : Queue * /*queue_pick*/ ) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " LB picker queued call " ) ;
} ,
// Fail pick.
[ ] ( LoadBalancingPolicy : : PickResult : : Fail * fail_pick ) {
return absl_status_to_grpc_error ( fail_pick - > status ) ;
} ,
// Drop pick.
[ ] ( LoadBalancingPolicy : : PickResult : : Drop * drop_pick ) {
return absl_status_to_grpc_error ( drop_pick - > status ) ;
} ) ;
}
void ClientChannel : : StartTransportOpLocked ( grpc_transport_op * op ) {
@ -1766,7 +1809,7 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
UpdateStateAndPickerLocked (
GRPC_CHANNEL_SHUTDOWN , absl : : Status ( ) , " shutdown from API " ,
absl : : make_unique < LoadBalancingPolicy : : TransientFailurePicker > (
GRPC_ERROR_REF ( op - > disconnect_with_error ) ) ) ;
grpc_error_to_absl_status ( op - > disconnect_with_error ) ) ) ;
}
}
GRPC_CHANNEL_STACK_UNREF ( owning_stack_ , " start_transport_op " ) ;
@ -2919,12 +2962,10 @@ void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
// If the LB policy requested a callback for trailing metadata, invoke
// the callback.
if ( self - > lb_recv_trailing_metadata_ready_ ! = nullptr ) {
grpc_error_handle error_for_lb = absl_status_to_grpc_error ( status ) ;
Metadata trailing_metadata ( self , self - > recv_trailing_metadata_ ) ;
LbCallState lb_call_state ( self ) ;
self - > lb_recv_trailing_metadata_ready_ ( error_for_lb , & trailing_metadata ,
self - > lb_recv_trailing_metadata_ready_ ( status , & trailing_metadata ,
& lb_call_state ) ;
GRPC_ERROR_UNREF ( error_for_lb ) ;
}
}
// Chain to original callback.
@ -3057,23 +3098,6 @@ void ClientChannel::LoadBalancedCall::PickDone(void* arg,
self - > CreateSubchannelCall ( ) ;
}
namespace {
const char * PickResultTypeName (
LoadBalancingPolicy : : PickResult : : ResultType type ) {
switch ( type ) {
case LoadBalancingPolicy : : PickResult : : PICK_COMPLETE :
return " COMPLETE " ;
case LoadBalancingPolicy : : PickResult : : PICK_QUEUE :
return " QUEUE " ;
case LoadBalancingPolicy : : PickResult : : PICK_FAILED :
return " FAILED " ;
}
GPR_UNREACHABLE_CODE ( return " UNKNOWN " ) ;
}
} // namespace
void ClientChannel : : LoadBalancedCall : : PickSubchannel ( void * arg ,
grpc_error_handle error ) {
auto * self = static_cast < LoadBalancedCall * > ( arg ) ;
@ -3107,64 +3131,82 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
Metadata initial_metadata ( this , initial_metadata_batch ) ;
pick_args . initial_metadata = & initial_metadata ;
auto result = chand_ - > picker_ - > Pick ( pick_args ) ;
if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) {
gpr_log (
GPR_INFO ,
" chand=%p lb_call=%p: LB pick returned %s (subchannel=%p, error=%s) " ,
chand_ , this , PickResultTypeName ( result . type ) , result . subchannel . get ( ) ,
grpc_error_std_string ( result . error ) . c_str ( ) ) ;
}
switch ( result . type ) {
case LoadBalancingPolicy : : PickResult : : PICK_FAILED : {
// If we're shutting down, fail all RPCs.
grpc_error_handle disconnect_error = chand_ - > disconnect_error ( ) ;
if ( disconnect_error ! = GRPC_ERROR_NONE ) {
GRPC_ERROR_UNREF ( result . error ) ;
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
* error = GRPC_ERROR_REF ( disconnect_error ) ;
return true ;
}
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
if ( ( send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY ) = = 0 ) {
grpc_error_handle new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING (
" Failed to pick subchannel " , & result . error , 1 ) ;
GRPC_ERROR_UNREF ( result . error ) ;
* error = new_error ;
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
return true ;
}
// If wait_for_ready is true, then queue to retry when we get a new
// picker.
GRPC_ERROR_UNREF ( result . error ) ;
}
// Fallthrough
case LoadBalancingPolicy : : PickResult : : PICK_QUEUE :
MaybeAddCallToLbQueuedCallsLocked ( ) ;
return false ;
default : // PICK_COMPLETE
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
// Handle drops.
if ( GPR_UNLIKELY ( result . subchannel = = nullptr ) ) {
result . error = grpc_error_set_int (
grpc_error_set_int ( GRPC_ERROR_CREATE_FROM_STATIC_STRING (
" Call dropped by load balancing policy " ) ,
GRPC_ERROR_INT_GRPC_STATUS ,
GRPC_STATUS_UNAVAILABLE ) ,
GRPC_ERROR_INT_LB_POLICY_DROP , 1 ) ;
} else {
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.
connected_subchannel_ =
chand_ - > GetConnectedSubchannelInDataPlane ( result . subchannel . get ( ) ) ;
GPR_ASSERT ( connected_subchannel_ ! = nullptr ) ;
}
lb_recv_trailing_metadata_ready_ = result . recv_trailing_metadata_ready ;
* error = result . error ;
return true ;
}
return HandlePickResult < bool > (
& result ,
// CompletePick
[ this ] ( LoadBalancingPolicy : : PickResult : : Complete * complete_pick )
ABSL_EXCLUSIVE_LOCKS_REQUIRED ( & ClientChannel : : data_plane_mu_ ) {
if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) {
gpr_log ( GPR_INFO ,
" chand=%p lb_call=%p: LB pick succeeded: subchannel=%p " ,
chand_ , this , complete_pick - > subchannel . get ( ) ) ;
}
GPR_ASSERT ( complete_pick - > subchannel ! = nullptr ) ;
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.
connected_subchannel_ = chand_ - > GetConnectedSubchannelInDataPlane (
complete_pick - > subchannel . get ( ) ) ;
GPR_ASSERT ( connected_subchannel_ ! = nullptr ) ;
lb_recv_trailing_metadata_ready_ =
std : : move ( complete_pick - > recv_trailing_metadata_ready ) ;
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
return true ;
} ,
// QueuePick
[ this ] ( LoadBalancingPolicy : : PickResult : : Queue * /*queue_pick*/ )
ABSL_EXCLUSIVE_LOCKS_REQUIRED ( & ClientChannel : : data_plane_mu_ ) {
if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) {
gpr_log ( GPR_INFO , " chand=%p lb_call=%p: LB pick queued " , chand_ ,
this ) ;
}
MaybeAddCallToLbQueuedCallsLocked ( ) ;
return false ;
} ,
// FailPick
[ this , send_initial_metadata_flags ,
& error ] ( LoadBalancingPolicy : : PickResult : : Fail * fail_pick )
ABSL_EXCLUSIVE_LOCKS_REQUIRED ( & ClientChannel : : data_plane_mu_ ) {
if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) {
gpr_log ( GPR_INFO , " chand=%p lb_call=%p: LB pick failed: %s " ,
chand_ , this , fail_pick - > status . ToString ( ) . c_str ( ) ) ;
}
// If we're shutting down, fail all RPCs.
grpc_error_handle disconnect_error = chand_ - > disconnect_error ( ) ;
if ( disconnect_error ! = GRPC_ERROR_NONE ) {
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
* error = GRPC_ERROR_REF ( disconnect_error ) ;
return true ;
}
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
if ( ( send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY ) = = 0 ) {
grpc_error_handle lb_error =
absl_status_to_grpc_error ( fail_pick - > status ) ;
* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING (
" Failed to pick subchannel " , & lb_error , 1 ) ;
GRPC_ERROR_UNREF ( lb_error ) ;
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
return true ;
}
// If wait_for_ready is true, then queue to retry when we get a new
// picker.
MaybeAddCallToLbQueuedCallsLocked ( ) ;
return false ;
} ,
// DropPick
[ this , & error ] ( LoadBalancingPolicy : : PickResult : : Drop * drop_pick )
ABSL_EXCLUSIVE_LOCKS_REQUIRED ( & ClientChannel : : data_plane_mu_ ) {
if ( GRPC_TRACE_FLAG_ENABLED ( grpc_client_channel_routing_trace ) ) {
gpr_log ( GPR_INFO , " chand=%p lb_call=%p: LB pick dropped: %s " ,
chand_ , this , drop_pick - > status . ToString ( ) . c_str ( ) ) ;
}
* error =
grpc_error_set_int ( absl_status_to_grpc_error ( drop_pick - > status ) ,
GRPC_ERROR_INT_LB_POLICY_DROP , 1 ) ;
MaybeRemoveCallFromLbQueuedCallsLocked ( ) ;
return true ;
} ) ;
}
} // namespace grpc_core