@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
}
}
}
}
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: setting connectivity state to %s " , chand ,
gpr_log ( GPR_INFO , " chand=%p: setting connectivity state to %s " , chand ,
grpc_connectivity_state_name ( state ) ) ;
grpc_connectivity_state_name ( state ) ) ;
}
}
grpc_connectivity_state_set ( & chand - > state_tracker , state , error , reason ) ;
grpc_connectivity_state_set ( & chand - > state_tracker , state , error , reason ) ;
@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
/* check if the notification is for the latest policy */
/* check if the notification is for the latest policy */
if ( w - > lb_policy = = w - > chand - > lb_policy . get ( ) ) {
if ( w - > lb_policy = = w - > chand - > lb_policy . get ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: lb_policy=%p state changed to %s " , w - > chand ,
gpr_log ( GPR_INFO , " chand=%p: lb_policy=%p state changed to %s " , w - > chand ,
w - > lb_policy , grpc_connectivity_state_name ( w - > state ) ) ;
w - > lb_policy , grpc_connectivity_state_name ( w - > state ) ) ;
}
}
set_channel_connectivity_state_locked ( w - > chand , w - > state ,
set_channel_connectivity_state_locked ( w - > chand , w - > state ,
@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
static void start_resolving_locked ( channel_data * chand ) {
static void start_resolving_locked ( channel_data * chand ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: starting name resolution " , chand ) ;
gpr_log ( GPR_INFO , " chand=%p: starting name resolution " , chand ) ;
}
}
GPR_ASSERT ( ! chand - > started_resolving ) ;
GPR_ASSERT ( ! chand - > started_resolving ) ;
chand - > started_resolving = true ;
chand - > started_resolving = true ;
@ -297,7 +297,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
return ;
return ;
}
}
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: started name re-resolving " , chand ) ;
gpr_log ( GPR_INFO , " chand=%p: started name re-resolving " , chand ) ;
}
}
chand - > resolver - > RequestReresolutionLocked ( ) ;
chand - > resolver - > RequestReresolutionLocked ( ) ;
// Give back the closure to the LB policy.
// Give back the closure to the LB policy.
@ -311,7 +311,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
static void on_resolver_result_changed_locked ( void * arg , grpc_error * error ) {
static void on_resolver_result_changed_locked ( void * arg , grpc_error * error ) {
channel_data * chand = static_cast < channel_data * > ( arg ) ;
channel_data * chand = static_cast < channel_data * > ( arg ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p: got resolver result: resolver_result=%p error=%s " , chand ,
" chand=%p: got resolver result: resolver_result=%p error=%s " , chand ,
chand - > resolver_result , grpc_error_string ( error ) ) ;
chand - > resolver_result , grpc_error_string ( error ) ) ;
}
}
@ -431,7 +431,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
}
}
}
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p: resolver result: lb_policy_name= \" %s \" %s, "
" chand=%p: resolver result: lb_policy_name= \" %s \" %s, "
" service_config= \" %s \" " ,
" service_config= \" %s \" " ,
chand , lb_policy_name_dup ,
chand , lb_policy_name_dup ,
@ -466,7 +466,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
chand - > resolver = = nullptr ) {
chand - > resolver = = nullptr ) {
if ( chand - > lb_policy ! = nullptr ) {
if ( chand - > lb_policy ! = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: unreffing lb_policy=%p " , chand ,
gpr_log ( GPR_INFO , " chand=%p: unreffing lb_policy=%p " , chand ,
chand - > lb_policy . get ( ) ) ;
chand - > lb_policy . get ( ) ) ;
}
}
grpc_pollset_set_del_pollset_set ( chand - > lb_policy - > interested_parties ( ) ,
grpc_pollset_set_del_pollset_set ( chand - > lb_policy - > interested_parties ( ) ,
@ -480,11 +480,11 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// error or shutdown.
// error or shutdown.
if ( error ! = GRPC_ERROR_NONE | | chand - > resolver = = nullptr ) {
if ( error ! = GRPC_ERROR_NONE | | chand - > resolver = = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: shutting down " , chand ) ;
gpr_log ( GPR_INFO , " chand=%p: shutting down " , chand ) ;
}
}
if ( chand - > resolver ! = nullptr ) {
if ( chand - > resolver ! = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: shutting down resolver " , chand ) ;
gpr_log ( GPR_INFO , " chand=%p: shutting down resolver " , chand ) ;
}
}
chand - > resolver . reset ( ) ;
chand - > resolver . reset ( ) ;
}
}
@ -506,7 +506,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " No load balancing policy " ) ;
GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " No load balancing policy " ) ;
if ( lb_policy_created ) {
if ( lb_policy_created ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p: initializing new LB policy " , chand ) ;
gpr_log ( GPR_INFO , " chand=%p: initializing new LB policy " , chand ) ;
}
}
GRPC_ERROR_UNREF ( state_error ) ;
GRPC_ERROR_UNREF ( state_error ) ;
state = chand - > lb_policy - > CheckConnectivityLocked ( & state_error ) ;
state = chand - > lb_policy - > CheckConnectivityLocked ( & state_error ) ;
@ -842,10 +842,11 @@ typedef struct {
bool completed_recv_trailing_metadata : 1 ;
bool completed_recv_trailing_metadata : 1 ;
// State for callback processing.
// State for callback processing.
bool retry_dispatched : 1 ;
bool retry_dispatched : 1 ;
bool recv_initial_metadata_ready_deferred : 1 ;
subchannel_batch_data * recv_initial_metadata_ready_deferred_batch ;
bool recv_message_ready_deferred : 1 ;
grpc_error * recv_initial_metadata_error ;
grpc_error * recv_initial_metadata_error ;
subchannel_batch_data * recv_message_ready_deferred_batch ;
grpc_error * recv_message_error ;
grpc_error * recv_message_error ;
subchannel_batch_data * recv_trailing_metadata_internal_batch ;
} subchannel_call_retry_state ;
} subchannel_call_retry_state ;
// Pending batches stored in call data.
// Pending batches stored in call data.
@ -994,6 +995,39 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
}
}
}
}
// Frees cached send_initial_metadata.
static void free_cached_send_initial_metadata ( channel_data * chand ,
call_data * calld ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: destroying calld->send_initial_metadata " , chand ,
calld ) ;
}
grpc_metadata_batch_destroy ( & calld - > send_initial_metadata ) ;
}
// Frees cached send_message at index idx.
static void free_cached_send_message ( channel_data * chand , call_data * calld ,
size_t idx ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: destroying calld->send_messages[% " PRIuPTR " ] " ,
chand , calld , idx ) ;
}
( * calld - > send_messages ) [ idx ] - > Destroy ( ) ;
}
// Frees cached send_trailing_metadata.
static void free_cached_send_trailing_metadata ( channel_data * chand ,
call_data * calld ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: destroying calld->send_trailing_metadata " ,
chand , calld ) ;
}
grpc_metadata_batch_destroy ( & calld - > send_trailing_metadata ) ;
}
// Frees cached send ops that have already been completed after
// Frees cached send ops that have already been completed after
// committing the call.
// committing the call.
static void free_cached_send_op_data_after_commit (
static void free_cached_send_op_data_after_commit (
@ -1001,19 +1035,13 @@ static void free_cached_send_op_data_after_commit(
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( retry_state - > completed_send_initial_metadata ) {
if ( retry_state - > completed_send_initial_metadata ) {
grpc_metadata_batch_destroy ( & calld - > send_initial_metadata ) ;
free_cached_send_initial_metadata ( chand , calld ) ;
}
}
for ( size_t i = 0 ; i < retry_state - > completed_send_message_count ; + + i ) {
for ( size_t i = 0 ; i < retry_state - > completed_send_message_count ; + + i ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
free_cached_send_message ( chand , calld , i ) ;
gpr_log ( GPR_DEBUG ,
" chand=%p calld=%p: destroying calld->send_messages[% " PRIuPTR
" ] " ,
chand , calld , i ) ;
}
( * calld - > send_messages ) [ i ] - > Destroy ( ) ;
}
}
if ( retry_state - > completed_send_trailing_metadata ) {
if ( retry_state - > completed_send_trailing_metadata ) {
grpc_metadata_batch_destroy ( & calld - > send_trailing_metadata ) ;
free_cached_send_trailing_metadata ( chand , calld ) ;
}
}
}
}
@ -1025,20 +1053,14 @@ static void free_cached_send_op_data_for_completed_batch(
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( batch_data - > batch . send_initial_metadata ) {
if ( batch_data - > batch . send_initial_metadata ) {
grpc_metadata_batch_destroy ( & calld - > send_initial_metadata ) ;
free_cached_send_initial_metadata ( chand , calld ) ;
}
}
if ( batch_data - > batch . send_message ) {
if ( batch_data - > batch . send_message ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
free_cached_send_message ( chand , calld ,
gpr_log ( GPR_DEBUG ,
retry_state - > completed_send_message_count - 1 ) ;
" chand=%p calld=%p: destroying calld->send_messages[% " PRIuPTR
" ] " ,
chand , calld , retry_state - > completed_send_message_count - 1 ) ;
}
( * calld - > send_messages ) [ retry_state - > completed_send_message_count - 1 ]
- > Destroy ( ) ;
}
}
if ( batch_data - > batch . send_trailing_metadata ) {
if ( batch_data - > batch . send_trailing_metadata ) {
grpc_metadata_batch_destroy ( & calld - > send_trailing_metadata ) ;
free_cached_send_trailing_metadata ( chand , calld ) ;
}
}
}
}
@ -1066,7 +1088,7 @@ static void pending_batches_add(grpc_call_element* elem,
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
const size_t idx = get_batch_index ( batch ) ;
const size_t idx = get_batch_index ( batch ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: adding pending batch at index % " PRIuPTR , chand ,
" chand=%p calld=%p: adding pending batch at index % " PRIuPTR , chand ,
calld , idx ) ;
calld , idx ) ;
}
}
@ -1094,7 +1116,7 @@ static void pending_batches_add(grpc_call_element* elem,
}
}
if ( calld - > bytes_buffered_for_retry > chand - > per_rpc_retry_buffer_size ) {
if ( calld - > bytes_buffered_for_retry > chand - > per_rpc_retry_buffer_size ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: exceeded retry buffer size, committing " ,
" chand=%p calld=%p: exceeded retry buffer size, committing " ,
chand , calld ) ;
chand , calld ) ;
}
}
@ -1109,7 +1131,7 @@ static void pending_batches_add(grpc_call_element* elem,
// retries are disabled so that we don't bother with retry overhead.
// retries are disabled so that we don't bother with retry overhead.
if ( calld - > num_attempts_completed = = 0 ) {
if ( calld - > num_attempts_completed = = 0 ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: disabling retries before first attempt " ,
" chand=%p calld=%p: disabling retries before first attempt " ,
chand , calld ) ;
chand , calld ) ;
}
}
@ -1156,7 +1178,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
if ( calld - > pending_batches [ i ] . batch ! = nullptr ) + + num_batches ;
if ( calld - > pending_batches [ i ] . batch ! = nullptr ) + + num_batches ;
}
}
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: failing % " PRIuPTR " pending batches: %s " ,
" chand=%p calld=%p: failing % " PRIuPTR " pending batches: %s " ,
elem - > channel_data , calld , num_batches , grpc_error_string ( error ) ) ;
elem - > channel_data , calld , num_batches , grpc_error_string ( error ) ) ;
}
}
@ -1218,7 +1240,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
if ( calld - > pending_batches [ i ] . batch ! = nullptr ) + + num_batches ;
if ( calld - > pending_batches [ i ] . batch ! = nullptr ) + + num_batches ;
}
}
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: starting % " PRIuPTR
" chand=%p calld=%p: starting % " PRIuPTR
" pending batches on subchannel_call=%p " ,
" pending batches on subchannel_call=%p " ,
chand , calld , num_batches , calld - > subchannel_call ) ;
chand , calld , num_batches , calld - > subchannel_call ) ;
@ -1263,7 +1285,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
( ! batch - > recv_message | |
( ! batch - > recv_message | |
batch - > payload - > recv_message . recv_message_ready = = nullptr ) ) {
batch - > payload - > recv_message . recv_message_ready = = nullptr ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: clearing pending batch " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: clearing pending batch " , chand ,
calld ) ;
calld ) ;
}
}
pending_batch_clear ( calld , pending ) ;
pending_batch_clear ( calld , pending ) ;
@ -1353,7 +1375,7 @@ static void retry_commit(grpc_call_element* elem,
if ( calld - > retry_committed ) return ;
if ( calld - > retry_committed ) return ;
calld - > retry_committed = true ;
calld - > retry_committed = true ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: committing retries " , chand , calld ) ;
gpr_log ( GPR_INFO , " chand=%p calld=%p: committing retries " , chand , calld ) ;
}
}
if ( retry_state ! = nullptr ) {
if ( retry_state ! = nullptr ) {
free_cached_send_op_data_after_commit ( elem , retry_state ) ;
free_cached_send_op_data_after_commit ( elem , retry_state ) ;
@ -1398,7 +1420,7 @@ static void do_retry(grpc_call_element* elem,
next_attempt_time = calld - > retry_backoff - > NextAttemptTime ( ) ;
next_attempt_time = calld - > retry_backoff - > NextAttemptTime ( ) ;
}
}
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: retrying failed call in % " PRId64 " ms " , chand ,
" chand=%p calld=%p: retrying failed call in % " PRId64 " ms " , chand ,
calld , next_attempt_time - grpc_core : : ExecCtx : : Get ( ) - > Now ( ) ) ;
calld , next_attempt_time - grpc_core : : ExecCtx : : Get ( ) - > Now ( ) ) ;
}
}
@ -1432,7 +1454,7 @@ static bool maybe_retry(grpc_call_element* elem,
batch_data - > subchannel_call ) ) ;
batch_data - > subchannel_call ) ) ;
if ( retry_state - > retry_dispatched ) {
if ( retry_state - > retry_dispatched ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: retry already dispatched " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: retry already dispatched " , chand ,
calld ) ;
calld ) ;
}
}
return true ;
return true ;
@ -1444,14 +1466,14 @@ static bool maybe_retry(grpc_call_element* elem,
calld - > retry_throttle_data - > RecordSuccess ( ) ;
calld - > retry_throttle_data - > RecordSuccess ( ) ;
}
}
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: call succeeded " , chand , calld ) ;
gpr_log ( GPR_INFO , " chand=%p calld=%p: call succeeded " , chand , calld ) ;
}
}
return false ;
return false ;
}
}
// Status is not OK. Check whether the status is retryable.
// Status is not OK. Check whether the status is retryable.
if ( ! retry_policy - > retryable_status_codes . Contains ( status ) ) {
if ( ! retry_policy - > retryable_status_codes . Contains ( status ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: status %s not configured as retryable " , chand ,
" chand=%p calld=%p: status %s not configured as retryable " , chand ,
calld , grpc_status_code_to_string ( status ) ) ;
calld , grpc_status_code_to_string ( status ) ) ;
}
}
@ -1467,14 +1489,14 @@ static bool maybe_retry(grpc_call_element* elem,
if ( calld - > retry_throttle_data ! = nullptr & &
if ( calld - > retry_throttle_data ! = nullptr & &
! calld - > retry_throttle_data - > RecordFailure ( ) ) {
! calld - > retry_throttle_data - > RecordFailure ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: retries throttled " , chand , calld ) ;
gpr_log ( GPR_INFO , " chand=%p calld=%p: retries throttled " , chand , calld ) ;
}
}
return false ;
return false ;
}
}
// Check whether the call is committed.
// Check whether the call is committed.
if ( calld - > retry_committed ) {
if ( calld - > retry_committed ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: retries already committed " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: retries already committed " , chand ,
calld ) ;
calld ) ;
}
}
return false ;
return false ;
@ -1483,7 +1505,7 @@ static bool maybe_retry(grpc_call_element* elem,
+ + calld - > num_attempts_completed ;
+ + calld - > num_attempts_completed ;
if ( calld - > num_attempts_completed > = retry_policy - > max_attempts ) {
if ( calld - > num_attempts_completed > = retry_policy - > max_attempts ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: exceeded %d retry attempts " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: exceeded %d retry attempts " , chand ,
calld , retry_policy - > max_attempts ) ;
calld , retry_policy - > max_attempts ) ;
}
}
return false ;
return false ;
@ -1491,7 +1513,7 @@ static bool maybe_retry(grpc_call_element* elem,
// If the call was cancelled from the surface, don't retry.
// If the call was cancelled from the surface, don't retry.
if ( calld - > cancel_error ! = GRPC_ERROR_NONE ) {
if ( calld - > cancel_error ! = GRPC_ERROR_NONE ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: call cancelled from surface, not retrying " ,
" chand=%p calld=%p: call cancelled from surface, not retrying " ,
chand , calld ) ;
chand , calld ) ;
}
}
@ -1504,16 +1526,15 @@ static bool maybe_retry(grpc_call_element* elem,
uint32_t ms ;
uint32_t ms ;
if ( ! grpc_parse_slice_to_uint32 ( GRPC_MDVALUE ( * server_pushback_md ) , & ms ) ) {
if ( ! grpc_parse_slice_to_uint32 ( GRPC_MDVALUE ( * server_pushback_md ) , & ms ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: not retrying due to server push-back " ,
" chand=%p calld=%p: not retrying due to server push-back " ,
chand , calld ) ;
chand , calld ) ;
}
}
return false ;
return false ;
} else {
} else {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: server push-back: retry in %u ms " ,
" chand=%p calld=%p: server push-back: retry in %u ms " , chand ,
chand , calld , ms ) ;
calld , ms ) ;
}
}
server_pushback_ms = ( grpc_millis ) ms ;
server_pushback_ms = ( grpc_millis ) ms ;
}
}
@ -1586,7 +1607,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
batch - > payload - > recv_initial_metadata . recv_initial_metadata_ready ! =
batch - > payload - > recv_initial_metadata . recv_initial_metadata_ready ! =
nullptr ) {
nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: invoking recv_initial_metadata_ready for "
" chand=%p calld=%p: invoking recv_initial_metadata_ready for "
" pending batch at index % " PRIuPTR ,
" pending batch at index % " PRIuPTR ,
chand , calld , i ) ;
chand , calld , i ) ;
@ -1622,7 +1643,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: got recv_initial_metadata_ready, error=%s " ,
" chand=%p calld=%p: got recv_initial_metadata_ready, error=%s " ,
chand , calld , grpc_error_string ( error ) ) ;
chand , calld , grpc_error_string ( error ) ) ;
}
}
@ -1637,12 +1658,12 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
if ( ( batch_data - > trailing_metadata_available | | error ! = GRPC_ERROR_NONE ) & &
if ( ( batch_data - > trailing_metadata_available | | error ! = GRPC_ERROR_NONE ) & &
! retry_state - > completed_recv_trailing_metadata ) {
! retry_state - > completed_recv_trailing_metadata ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: deferring recv_initial_metadata_ready "
" chand=%p calld=%p: deferring recv_initial_metadata_ready "
" (Trailers-Only) " ,
" (Trailers-Only) " ,
chand , calld ) ;
chand , calld ) ;
}
}
retry_state - > recv_initial_metadata_ready_deferred = true ;
retry_state - > recv_initial_metadata_ready_deferred_batch = batch_data ;
retry_state - > recv_initial_metadata_error = GRPC_ERROR_REF ( error ) ;
retry_state - > recv_initial_metadata_error = GRPC_ERROR_REF ( error ) ;
if ( ! retry_state - > started_recv_trailing_metadata ) {
if ( ! retry_state - > started_recv_trailing_metadata ) {
// recv_trailing_metadata not yet started by application; start it
// recv_trailing_metadata not yet started by application; start it
@ -1679,7 +1700,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
if ( batch ! = nullptr & & batch - > recv_message & &
if ( batch ! = nullptr & & batch - > recv_message & &
batch - > payload - > recv_message . recv_message_ready ! = nullptr ) {
batch - > payload - > recv_message . recv_message_ready ! = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: invoking recv_message_ready for "
" chand=%p calld=%p: invoking recv_message_ready for "
" pending batch at index % " PRIuPTR ,
" pending batch at index % " PRIuPTR ,
chand , calld , i ) ;
chand , calld , i ) ;
@ -1712,7 +1733,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: got recv_message_ready, error=%s " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: got recv_message_ready, error=%s " ,
chand , calld , grpc_error_string ( error ) ) ;
chand , calld , grpc_error_string ( error ) ) ;
}
}
subchannel_call_retry_state * retry_state =
subchannel_call_retry_state * retry_state =
@ -1726,12 +1747,12 @@ static void recv_message_ready(void* arg, grpc_error* error) {
if ( ( batch_data - > recv_message = = nullptr | | error ! = GRPC_ERROR_NONE ) & &
if ( ( batch_data - > recv_message = = nullptr | | error ! = GRPC_ERROR_NONE ) & &
! retry_state - > completed_recv_trailing_metadata ) {
! retry_state - > completed_recv_trailing_metadata ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: deferring recv_message_ready (nullptr "
" chand=%p calld=%p: deferring recv_message_ready (nullptr "
" message and recv_trailing_metadata pending) " ,
" message and recv_trailing_metadata pending) " ,
chand , calld ) ;
chand , calld ) ;
}
}
retry_state - > recv_message_ready_deferred = true ;
retry_state - > recv_message_ready_deferred_batch = batch_data ;
retry_state - > recv_message_error = GRPC_ERROR_REF ( error ) ;
retry_state - > recv_message_error = GRPC_ERROR_REF ( error ) ;
if ( ! retry_state - > started_recv_trailing_metadata ) {
if ( ! retry_state - > started_recv_trailing_metadata ) {
// recv_trailing_metadata not yet started by application; start it
// recv_trailing_metadata not yet started by application; start it
@ -1749,6 +1770,59 @@ static void recv_message_ready(void* arg, grpc_error* error) {
GRPC_ERROR_UNREF ( error ) ;
GRPC_ERROR_UNREF ( error ) ;
}
}
//
// list of closures to execute in call combiner
//
// Represents a closure that needs to run in the call combiner as part of
// starting or completing a batch.
typedef struct {
grpc_closure * closure ;
grpc_error * error ;
const char * reason ;
bool free_reason = false ;
} closure_to_execute ;
static void execute_closures_in_call_combiner ( grpc_call_element * elem ,
const char * caller ,
closure_to_execute * closures ,
size_t num_closures ) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
// Note that the call combiner will be yielded for each closure that
// we schedule. We're already running in the call combiner, so one of
// the closures can be scheduled directly, but the others will
// have to re-enter the call combiner.
if ( num_closures > 0 ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO , " chand=%p calld=%p: %s starting closure: %s " , chand ,
calld , caller , closures [ 0 ] . reason ) ;
}
GRPC_CLOSURE_SCHED ( closures [ 0 ] . closure , closures [ 0 ] . error ) ;
if ( closures [ 0 ] . free_reason ) {
gpr_free ( const_cast < char * > ( closures [ 0 ] . reason ) ) ;
}
for ( size_t i = 1 ; i < num_closures ; + + i ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: %s starting closure in call combiner: %s " ,
chand , calld , caller , closures [ i ] . reason ) ;
}
GRPC_CALL_COMBINER_START ( calld - > call_combiner , closures [ i ] . closure ,
closures [ i ] . error , closures [ i ] . reason ) ;
if ( closures [ i ] . free_reason ) {
gpr_free ( const_cast < char * > ( closures [ i ] . reason ) ) ;
}
}
} else {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO , " chand=%p calld=%p: no closures to run for %s " , chand ,
calld , caller ) ;
}
GRPC_CALL_COMBINER_STOP ( calld - > call_combiner , " no closures to run " ) ;
}
}
//
//
// on_complete callback handling
// on_complete callback handling
//
//
@ -1777,36 +1851,35 @@ static void update_retry_state_for_completed_batch(
}
}
}
}
// Represents a closure that needs to run as a result of a completed batch.
typedef struct {
grpc_closure * closure ;
grpc_error * error ;
const char * reason ;
} closure_to_execute ;
// Adds any necessary closures for deferred recv_initial_metadata and
// Adds any necessary closures for deferred recv_initial_metadata and
// recv_message callbacks to closures, updating *num_closures as needed.
// recv_message callbacks to closures, updating *num_closures as needed.
static void add_closures_for_deferred_recv_callbacks (
static void add_closures_for_deferred_recv_callbacks (
subchannel_batch_data * batch_data , subchannel_call_retry_state * retry_state ,
subchannel_batch_data * batch_data , subchannel_call_retry_state * retry_state ,
closure_to_execute * closures , size_t * num_closures ) {
closure_to_execute * closures , size_t * num_closures ) {
if ( batch_data - > batch . recv_trailing_metadata & &
if ( batch_data - > batch . recv_trailing_metadata ) {
retry_state - > recv_initial_metadata_ready_deferred ) {
// Add closure for deferred recv_initial_metadata_ready.
if ( retry_state - > recv_initial_metadata_ready_deferred_batch ! = nullptr ) {
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure - > closure =
closure - > closure = GRPC_CLOSURE_INIT (
GRPC_CLOSURE_INIT ( & batch_data - > recv_initial_metadata_ready ,
& batch_data - > recv_initial_metadata_ready ,
invoke_recv_initial_metadata_callback , batch_data ,
invoke_recv_initial_metadata_callback ,
retry_state - > recv_initial_metadata_ready_deferred_batch ,
grpc_schedule_on_exec_ctx ) ;
grpc_schedule_on_exec_ctx ) ;
closure - > error = retry_state - > recv_initial_metadata_error ;
closure - > error = retry_state - > recv_initial_metadata_error ;
closure - > reason = " resuming recv_initial_metadata_ready " ;
closure - > reason = " resuming recv_initial_metadata_ready " ;
retry_state - > recv_initial_metadata_ready_deferred_batch = nullptr ;
}
}
if ( batch_data - > batch . recv_trailing_metadata & &
// Add closure for deferred recv_message_ready.
retry_state - > recv_message_ready_deferred ) {
if ( retry_state - > recv_message_ready_deferred_batch ! = nullptr ) {
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure - > closure = GRPC_CLOSURE_INIT ( & batch_data - > recv_message_ready ,
closure - > closure = GRPC_CLOSURE_INIT (
invoke_recv_message_callback ,
& batch_data - > recv_message_ready , invoke_recv_message_callback ,
batch_data , grpc_schedule_on_exec_ctx ) ;
retry_state - > recv_message_ready_deferred_batch ,
grpc_schedule_on_exec_ctx ) ;
closure - > error = retry_state - > recv_message_error ;
closure - > error = retry_state - > recv_message_error ;
closure - > reason = " resuming recv_message_ready " ;
closure - > reason = " resuming recv_message_ready " ;
retry_state - > recv_message_ready_deferred_batch = nullptr ;
}
}
}
}
}
@ -1838,7 +1911,7 @@ static void add_closures_for_replay_or_pending_send_ops(
}
}
if ( have_pending_send_message_ops | | have_pending_send_trailing_metadata_op ) {
if ( have_pending_send_message_ops | | have_pending_send_trailing_metadata_op ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: starting next batch for pending send op(s) " ,
" chand=%p calld=%p: starting next batch for pending send op(s) " ,
chand , calld ) ;
chand , calld ) ;
}
}
@ -1863,7 +1936,7 @@ static void add_closures_for_completed_pending_batches(
pending_batch * pending = & calld - > pending_batches [ i ] ;
pending_batch * pending = & calld - > pending_batches [ i ] ;
if ( pending_batch_is_completed ( pending , calld , retry_state ) ) {
if ( pending_batch_is_completed ( pending , calld , retry_state ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: pending batch completed at index % " PRIuPTR ,
" chand=%p calld=%p: pending batch completed at index % " PRIuPTR ,
chand , calld , i ) ;
chand , calld , i ) ;
}
}
@ -1896,7 +1969,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
pending_batch * pending = & calld - > pending_batches [ i ] ;
pending_batch * pending = & calld - > pending_batches [ i ] ;
if ( pending_batch_is_unstarted ( pending , calld , retry_state ) ) {
if ( pending_batch_is_unstarted ( pending , calld , retry_state ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: failing unstarted pending batch at index "
" chand=%p calld=%p: failing unstarted pending batch at index "
" % " PRIuPTR ,
" % " PRIuPTR ,
chand , calld , i ) ;
chand , calld , i ) ;
@ -1940,7 +2013,7 @@ static void on_complete(void* arg, grpc_error* error) {
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
char * batch_str = grpc_transport_stream_op_batch_string ( & batch_data - > batch ) ;
char * batch_str = grpc_transport_stream_op_batch_string ( & batch_data - > batch ) ;
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: got on_complete, error=%s, batch=%s " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: got on_complete, error=%s, batch=%s " ,
chand , calld , grpc_error_string ( error ) , batch_str ) ;
chand , calld , grpc_error_string ( error ) , batch_str ) ;
gpr_free ( batch_str ) ;
gpr_free ( batch_str ) ;
}
}
@ -1951,11 +2024,13 @@ static void on_complete(void* arg, grpc_error* error) {
// If we have previously completed recv_trailing_metadata, then the
// If we have previously completed recv_trailing_metadata, then the
// call is finished.
// call is finished.
bool call_finished = retry_state - > completed_recv_trailing_metadata ;
bool call_finished = retry_state - > completed_recv_trailing_metadata ;
// Record whether we were already committed before receiving this callback.
const bool previously_committed = calld - > retry_committed ;
// Update bookkeeping in retry_state.
// Update bookkeeping in retry_state.
update_retry_state_for_completed_batch ( batch_data , retry_state ) ;
update_retry_state_for_completed_batch ( batch_data , retry_state ) ;
if ( call_finished ) {
if ( call_finished ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: call already finished " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: call already finished " , chand ,
calld ) ;
calld ) ;
}
}
} else {
} else {
@ -1979,35 +2054,39 @@ static void on_complete(void* arg, grpc_error* error) {
if ( md_batch - > idx . named . grpc_retry_pushback_ms ! = nullptr ) {
if ( md_batch - > idx . named . grpc_retry_pushback_ms ! = nullptr ) {
server_pushback_md = & md_batch - > idx . named . grpc_retry_pushback_ms - > md ;
server_pushback_md = & md_batch - > idx . named . grpc_retry_pushback_ms - > md ;
}
}
} else if ( retry_state - > completed_recv_trailing_metadata ) {
call_finished = true ;
}
}
if ( call_finished & & grpc_client_channel_trace . enabled ( ) ) {
// If the call just finished, check if we should retry.
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: call finished, status=%s " , chand ,
if ( call_finished ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_INFO , " chand=%p calld=%p: call finished, status=%s " , chand ,
calld , grpc_status_code_to_string ( status ) ) ;
calld , grpc_status_code_to_string ( status ) ) ;
}
}
// If the call is finished, check if we should retry.
if ( maybe_retry ( elem , batch_data , status , server_pushback_md ) ) {
if ( call_finished & &
maybe_retry ( elem , batch_data , status , server_pushback_md ) ) {
// Unref batch_data for deferred recv_initial_metadata_ready or
// Unref batch_data for deferred recv_initial_metadata_ready or
// recv_message_ready callbacks, if any.
// recv_message_ready callbacks, if any.
if ( batch_data - > batch . recv_trailing_metadata & &
if ( batch_data - > batch . recv_trailing_metadata & &
retry_state - > recv_initial_metadata_ready_deferred ) {
retry_state - > recv_initial_metadata_ready_deferred_batch ! =
nullptr ) {
batch_data_unref ( batch_data ) ;
batch_data_unref ( batch_data ) ;
GRPC_ERROR_UNREF ( retry_state - > recv_initial_metadata_error ) ;
GRPC_ERROR_UNREF ( retry_state - > recv_initial_metadata_error ) ;
}
}
if ( batch_data - > batch . recv_trailing_metadata & &
if ( batch_data - > batch . recv_trailing_metadata & &
retry_state - > recv_message_ready_deferred ) {
retry_state - > recv_message_ready_deferred_batch ! = nullptr ) {
batch_data_unref ( batch_data ) ;
batch_data_unref ( batch_data ) ;
GRPC_ERROR_UNREF ( retry_state - > recv_message_error ) ;
GRPC_ERROR_UNREF ( retry_state - > recv_message_error ) ;
}
}
batch_data_unref ( batch_data ) ;
batch_data_unref ( batch_data ) ;
return ;
return ;
}
}
// Not retrying, so commit the call.
retry_commit ( elem , retry_state ) ;
}
}
}
// If the call is finished or retries are committed, free cached data for
// If we were already committed before receiving this callback, free
// send ops that we've just completed.
// cached data for send ops that we've just completed. (If the call has
if ( call_finished | | calld - > retry_committed ) {
// just now finished, the call to retry_commit() above will have freed all
// cached send ops, so we don't need to do it here.)
if ( previously_committed ) {
free_cached_send_op_data_for_completed_batch ( elem , batch_data , retry_state ) ;
free_cached_send_op_data_for_completed_batch ( elem , batch_data , retry_state ) ;
}
}
// Call not being retried.
// Call not being retried.
@ -2042,20 +2121,8 @@ static void on_complete(void* arg, grpc_error* error) {
// Don't need batch_data anymore.
// Don't need batch_data anymore.
batch_data_unref ( batch_data ) ;
batch_data_unref ( batch_data ) ;
// Schedule all of the closures identified above.
// Schedule all of the closures identified above.
// Note that the call combiner will be yielded for each closure that
execute_closures_in_call_combiner ( elem , " on_complete " , closures ,
// we schedule. We're already running in the call combiner, so one of
num_closures ) ;
// the closures can be scheduled directly, but the others will
// have to re-enter the call combiner.
if ( num_closures > 0 ) {
GRPC_CLOSURE_SCHED ( closures [ 0 ] . closure , closures [ 0 ] . error ) ;
for ( size_t i = 1 ; i < num_closures ; + + i ) {
GRPC_CALL_COMBINER_START ( calld - > call_combiner , closures [ i ] . closure ,
closures [ i ] . error , closures [ i ] . reason ) ;
}
} else {
GRPC_CALL_COMBINER_STOP ( calld - > call_combiner ,
" no closures to run for on_complete " ) ;
}
}
}
//
//
@ -2072,6 +2139,31 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
grpc_subchannel_call_process_op ( subchannel_call , batch ) ;
grpc_subchannel_call_process_op ( subchannel_call , batch ) ;
}
}
// Adds a closure to closures that will execute batch in the call combiner.
static void add_closure_for_subchannel_batch (
call_data * calld , grpc_transport_stream_op_batch * batch ,
closure_to_execute * closures , size_t * num_closures ) {
batch - > handler_private . extra_arg = calld - > subchannel_call ;
GRPC_CLOSURE_INIT ( & batch - > handler_private . closure ,
start_batch_in_call_combiner , batch ,
grpc_schedule_on_exec_ctx ) ;
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure - > closure = & batch - > handler_private . closure ;
closure - > error = GRPC_ERROR_NONE ;
// If the tracer is enabled, we log a more detailed message, which
// requires dynamic allocation. This will be freed in
// start_retriable_subchannel_batches().
if ( grpc_client_channel_trace . enabled ( ) ) {
char * batch_str = grpc_transport_stream_op_batch_string ( batch ) ;
gpr_asprintf ( const_cast < char * * > ( & closure - > reason ) ,
" starting batch in call combiner: %s " , batch_str ) ;
gpr_free ( batch_str ) ;
closure - > free_reason = true ;
} else {
closure - > reason = " start_subchannel_batch " ;
}
}
// Adds retriable send_initial_metadata op to batch_data.
// Adds retriable send_initial_metadata op to batch_data.
static void add_retriable_send_initial_metadata_op (
static void add_retriable_send_initial_metadata_op (
call_data * calld , subchannel_call_retry_state * retry_state ,
call_data * calld , subchannel_call_retry_state * retry_state ,
@ -2131,7 +2223,7 @@ static void add_retriable_send_message_op(
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: starting calld->send_messages[% " PRIuPTR " ] " ,
" chand=%p calld=%p: starting calld->send_messages[% " PRIuPTR " ] " ,
chand , calld , retry_state - > started_send_message_count ) ;
chand , calld , retry_state - > started_send_message_count ) ;
}
}
@ -2218,7 +2310,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: call failed but recv_trailing_metadata not "
" chand=%p calld=%p: call failed but recv_trailing_metadata not "
" started; starting it internally " ,
" started; starting it internally " ,
chand , calld ) ;
chand , calld ) ;
@ -2227,8 +2319,12 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
static_cast < subchannel_call_retry_state * > (
static_cast < subchannel_call_retry_state * > (
grpc_connected_subchannel_call_get_parent_data (
grpc_connected_subchannel_call_get_parent_data (
calld - > subchannel_call ) ) ;
calld - > subchannel_call ) ) ;
subchannel_batch_data * batch_data = batch_data_create ( elem , 1 ) ;
// Create batch_data with 2 refs, since this batch will be unreffed twice:
// once when the subchannel batch returns, and again when we actually get
// a recv_trailing_metadata op from the surface.
subchannel_batch_data * batch_data = batch_data_create ( elem , 2 ) ;
add_retriable_recv_trailing_metadata_op ( calld , retry_state , batch_data ) ;
add_retriable_recv_trailing_metadata_op ( calld , retry_state , batch_data ) ;
retry_state - > recv_trailing_metadata_internal_batch = batch_data ;
// Note: This will release the call combiner.
// Note: This will release the call combiner.
grpc_subchannel_call_process_op ( calld - > subchannel_call , & batch_data - > batch ) ;
grpc_subchannel_call_process_op ( calld - > subchannel_call , & batch_data - > batch ) ;
}
}
@ -2246,7 +2342,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
! retry_state - > started_send_initial_metadata & &
! retry_state - > started_send_initial_metadata & &
! calld - > pending_send_initial_metadata ) {
! calld - > pending_send_initial_metadata ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: replaying previously completed "
" chand=%p calld=%p: replaying previously completed "
" send_initial_metadata op " ,
" send_initial_metadata op " ,
chand , calld ) ;
chand , calld ) ;
@ -2262,7 +2358,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state - > completed_send_message_count & &
retry_state - > completed_send_message_count & &
! calld - > pending_send_message ) {
! calld - > pending_send_message ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: replaying previously completed "
" chand=%p calld=%p: replaying previously completed "
" send_message op " ,
" send_message op " ,
chand , calld ) ;
chand , calld ) ;
@ -2281,7 +2377,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
! retry_state - > started_send_trailing_metadata & &
! retry_state - > started_send_trailing_metadata & &
! calld - > pending_send_trailing_metadata ) {
! calld - > pending_send_trailing_metadata ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: replaying previously completed "
" chand=%p calld=%p: replaying previously completed "
" send_trailing_metadata op " ,
" send_trailing_metadata op " ,
chand , calld ) ;
chand , calld ) ;
@ -2299,7 +2395,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
// *num_batches as needed.
// *num_batches as needed.
static void add_subchannel_batches_for_pending_batches (
static void add_subchannel_batches_for_pending_batches (
grpc_call_element * elem , subchannel_call_retry_state * retry_state ,
grpc_call_element * elem , subchannel_call_retry_state * retry_state ,
grpc_transport_stream_op_batch * * batch es, size_t * num_batch es ) {
closure_to_execute * closur es, size_t * num_closur es ) {
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( calld - > pending_batches ) ; + + i ) {
pending_batch * pending = & calld - > pending_batches [ i ] ;
pending_batch * pending = & calld - > pending_batches [ i ] ;
@ -2342,13 +2438,37 @@ static void add_subchannel_batches_for_pending_batches(
}
}
if ( batch - > recv_trailing_metadata & &
if ( batch - > recv_trailing_metadata & &
retry_state - > started_recv_trailing_metadata ) {
retry_state - > started_recv_trailing_metadata ) {
// If we previously completed a recv_trailing_metadata op
// initiated by start_internal_recv_trailing_metadata(), use the
// result of that instead of trying to re-start this op.
if ( retry_state - > recv_trailing_metadata_internal_batch ! = nullptr ) {
// If the batch completed, then trigger the completion callback
// directly, so that we return the previously returned results to
// the application. Otherwise, just unref the internally
// started subchannel batch, since we'll propagate the
// completion when it completes.
if ( retry_state - > completed_recv_trailing_metadata ) {
subchannel_batch_data * batch_data =
retry_state - > recv_trailing_metadata_internal_batch ;
closure_to_execute * closure = & closures [ ( * num_closures ) + + ] ;
closure - > closure = & batch_data - > on_complete ;
// Batches containing recv_trailing_metadata always succeed.
closure - > error = GRPC_ERROR_NONE ;
closure - > reason =
" re-executing on_complete for recv_trailing_metadata "
" to propagate internally triggered result " ;
} else {
batch_data_unref ( retry_state - > recv_trailing_metadata_internal_batch ) ;
}
retry_state - > recv_trailing_metadata_internal_batch = nullptr ;
}
continue ;
continue ;
}
}
// If we're not retrying, just send the batch as-is.
// If we're not retrying, just send the batch as-is.
if ( calld - > method_params = = nullptr | |
if ( calld - > method_params = = nullptr | |
calld - > method_params - > retry_policy ( ) = = nullptr | |
calld - > method_params - > retry_policy ( ) = = nullptr | |
calld - > retry_committed ) {
calld - > retry_committed ) {
batches [ ( * num_batches ) + + ] = batch ;
add_closure_for_subchannel_batch ( calld , batch , closures , num_closures ) ;
pending_batch_clear ( calld , pending ) ;
pending_batch_clear ( calld , pending ) ;
continue ;
continue ;
}
}
@ -2385,7 +2505,8 @@ static void add_subchannel_batches_for_pending_batches(
GPR_ASSERT ( batch - > collect_stats ) ;
GPR_ASSERT ( batch - > collect_stats ) ;
add_retriable_recv_trailing_metadata_op ( calld , retry_state , batch_data ) ;
add_retriable_recv_trailing_metadata_op ( calld , retry_state , batch_data ) ;
}
}
batches [ ( * num_batches ) + + ] = & batch_data - > batch ;
add_closure_for_subchannel_batch ( calld , & batch_data - > batch , closures ,
num_closures ) ;
}
}
}
}
@ -2396,69 +2517,36 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: constructing retriable batches " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: constructing retriable batches " ,
chand , calld ) ;
chand , calld ) ;
}
}
subchannel_call_retry_state * retry_state =
subchannel_call_retry_state * retry_state =
static_cast < subchannel_call_retry_state * > (
static_cast < subchannel_call_retry_state * > (
grpc_connected_subchannel_call_get_parent_data (
grpc_connected_subchannel_call_get_parent_data (
calld - > subchannel_call ) ) ;
calld - > subchannel_call ) ) ;
// Construct list of closures to execute, one for each pending batch.
// We can start up to 6 batches.
// We can start up to 6 batches.
grpc_transport_stream_op_batch *
closure_to_execute closures [ GPR_ARRAY_SIZE ( calld - > pending_batches ) ] ;
batches [ GPR_ARRAY_SIZE ( calld - > pending_batches ) ] ;
size_t num_closures = 0 ;
size_t num_batches = 0 ;
// Replay previously-returned send_* ops if needed.
// Replay previously-returned send_* ops if needed.
subchannel_batch_data * replay_batch_data =
subchannel_batch_data * replay_batch_data =
maybe_create_subchannel_batch_for_replay ( elem , retry_state ) ;
maybe_create_subchannel_batch_for_replay ( elem , retry_state ) ;
if ( replay_batch_data ! = nullptr ) {
if ( replay_batch_data ! = nullptr ) {
batches [ num_batches + + ] = & replay_batch_data - > batch ;
add_closure_for_subchannel_batch ( calld , & replay_batch_data - > batch , closures ,
& num_closures ) ;
}
}
// Now add pending batches.
// Now add pending batches.
add_subchannel_batches_for_pending_batches ( elem , retry_state , batch es,
add_subchannel_batches_for_pending_batches ( elem , retry_state , closur es,
& num_batch es ) ;
& num_closur es ) ;
// Start batches on subchannel call.
// Start batches on subchannel call.
// Note that the call combiner will be yielded for each batch that we
// send down. We're already running in the call combiner, so one of
// the batches can be started directly, but the others will have to
// re-enter the call combiner.
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: starting % " PRIuPTR
" chand=%p calld=%p: starting % " PRIuPTR
" retriable batches on subchannel_call=%p " ,
" retriable batches on subchannel_call=%p " ,
chand , calld , num_batches , calld - > subchannel_call ) ;
chand , calld , num_closures , calld - > subchannel_call ) ;
}
if ( num_batches = = 0 ) {
// This should be fairly rare, but it can happen when (e.g.) an
// attempt completes before it has finished replaying all
// previously sent messages.
GRPC_CALL_COMBINER_STOP ( calld - > call_combiner ,
" no retriable subchannel batches to start " ) ;
} else {
for ( size_t i = 1 ; i < num_batches ; + + i ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
char * batch_str = grpc_transport_stream_op_batch_string ( batches [ i ] ) ;
gpr_log ( GPR_DEBUG ,
" chand=%p calld=%p: starting batch in call combiner: %s " , chand ,
calld , batch_str ) ;
gpr_free ( batch_str ) ;
}
batches [ i ] - > handler_private . extra_arg = calld - > subchannel_call ;
GRPC_CLOSURE_INIT ( & batches [ i ] - > handler_private . closure ,
start_batch_in_call_combiner , batches [ i ] ,
grpc_schedule_on_exec_ctx ) ;
GRPC_CALL_COMBINER_START ( calld - > call_combiner ,
& batches [ i ] - > handler_private . closure ,
GRPC_ERROR_NONE , " start_subchannel_batch " ) ;
}
if ( grpc_client_channel_trace . enabled ( ) ) {
char * batch_str = grpc_transport_stream_op_batch_string ( batches [ 0 ] ) ;
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: starting batch: %s " , chand , calld ,
batch_str ) ;
gpr_free ( batch_str ) ;
}
// Note: This will release the call combiner.
grpc_subchannel_call_process_op ( calld - > subchannel_call , batches [ 0 ] ) ;
}
}
execute_closures_in_call_combiner ( elem , " start_retriable_subchannel_batches " ,
closures , num_closures ) ;
}
}
//
//
@ -2483,7 +2571,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
grpc_error * new_error = calld - > pick . connected_subchannel - > CreateCall (
grpc_error * new_error = calld - > pick . connected_subchannel - > CreateCall (
call_args , & calld - > subchannel_call ) ;
call_args , & calld - > subchannel_call ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: create subchannel_call=%p: error=%s " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: create subchannel_call=%p: error=%s " ,
chand , calld , calld - > subchannel_call , grpc_error_string ( new_error ) ) ;
chand , calld , calld - > subchannel_call , grpc_error_string ( new_error ) ) ;
}
}
if ( new_error ! = GRPC_ERROR_NONE ) {
if ( new_error ! = GRPC_ERROR_NONE ) {
@ -2524,7 +2612,7 @@ static void pick_done(void* arg, grpc_error* error) {
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING (
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING (
" Failed to create subchannel " , & error , 1 ) ;
" Failed to create subchannel " , & error , 1 ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: failed to create subchannel: error=%s " ,
" chand=%p calld=%p: failed to create subchannel: error=%s " ,
chand , calld , grpc_error_string ( new_error ) ) ;
chand , calld , grpc_error_string ( new_error ) ) ;
}
}
@ -2568,7 +2656,7 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// the one we started it on. However, this will just be a no-op.
// the one we started it on. However, this will just be a no-op.
if ( error ! = GRPC_ERROR_NONE & & chand - > lb_policy ! = nullptr ) {
if ( error ! = GRPC_ERROR_NONE & & chand - > lb_policy ! = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: cancelling pick from LB policy %p " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: cancelling pick from LB policy %p " ,
chand , calld , chand - > lb_policy . get ( ) ) ;
chand , calld , chand - > lb_policy . get ( ) ) ;
}
}
chand - > lb_policy - > CancelPickLocked ( & calld - > pick , GRPC_ERROR_REF ( error ) ) ;
chand - > lb_policy - > CancelPickLocked ( & calld - > pick , GRPC_ERROR_REF ( error ) ) ;
@ -2583,8 +2671,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: pick completed asynchronously " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: pick completed asynchronously " , chand ,
chand , c alld ) ;
calld ) ;
}
}
async_pick_done_locked ( elem , GRPC_ERROR_REF ( error ) ) ;
async_pick_done_locked ( elem , GRPC_ERROR_REF ( error ) ) ;
GRPC_CALL_STACK_UNREF ( calld - > owning_call , " pick_callback " ) ;
GRPC_CALL_STACK_UNREF ( calld - > owning_call , " pick_callback " ) ;
@ -2596,7 +2684,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: applying service config to call " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: applying service config to call " ,
chand , calld ) ;
chand , calld ) ;
}
}
if ( chand - > retry_throttle_data ! = nullptr ) {
if ( chand - > retry_throttle_data ! = nullptr ) {
@ -2634,8 +2722,8 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: starting pick on lb_policy=%p " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: starting pick on lb_policy=%p " , chand ,
chand , c alld , chand - > lb_policy . get ( ) ) ;
calld , chand - > lb_policy . get ( ) ) ;
}
}
// Only get service config data on the first attempt.
// Only get service config data on the first attempt.
if ( calld - > num_attempts_completed = = 0 ) {
if ( calld - > num_attempts_completed = = 0 ) {
@ -2682,7 +2770,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
if ( pick_done ) {
if ( pick_done ) {
// Pick completed synchronously.
// Pick completed synchronously.
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: pick completed synchronously " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: pick completed synchronously " ,
chand , calld ) ;
chand , calld ) ;
}
}
GRPC_CALL_STACK_UNREF ( calld - > owning_call , " pick_callback " ) ;
GRPC_CALL_STACK_UNREF ( calld - > owning_call , " pick_callback " ) ;
@ -2726,7 +2814,7 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: cancelling pick waiting for resolver result " ,
" chand=%p calld=%p: cancelling pick waiting for resolver result " ,
chand , calld ) ;
chand , calld ) ;
}
}
@ -2746,7 +2834,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
if ( args - > finished ) {
if ( args - > finished ) {
/* cancelled, do nothing */
/* cancelled, do nothing */
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " call cancelled before resolver result " ) ;
gpr_log ( GPR_INFO , " call cancelled before resolver result " ) ;
}
}
gpr_free ( args ) ;
gpr_free ( args ) ;
return ;
return ;
@ -2757,14 +2845,14 @@ static void pick_after_resolver_result_done_locked(void* arg,
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( error ! = GRPC_ERROR_NONE ) {
if ( error ! = GRPC_ERROR_NONE ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: resolver failed to return data " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: resolver failed to return data " ,
chand , calld ) ;
chand , calld ) ;
}
}
async_pick_done_locked ( elem , GRPC_ERROR_REF ( error ) ) ;
async_pick_done_locked ( elem , GRPC_ERROR_REF ( error ) ) ;
} else if ( chand - > resolver = = nullptr ) {
} else if ( chand - > resolver = = nullptr ) {
// Shutting down.
// Shutting down.
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: resolver disconnected " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: resolver disconnected " , chand ,
calld ) ;
calld ) ;
}
}
async_pick_done_locked (
async_pick_done_locked (
@ -2780,7 +2868,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
. send_initial_metadata_flags ;
. send_initial_metadata_flags ;
if ( send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY ) {
if ( send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: resolver returned but no LB policy; "
" chand=%p calld=%p: resolver returned but no LB policy; "
" wait_for_ready=true; trying again " ,
" wait_for_ready=true; trying again " ,
chand , calld ) ;
chand , calld ) ;
@ -2788,7 +2876,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
pick_after_resolver_result_start_locked ( elem ) ;
pick_after_resolver_result_start_locked ( elem ) ;
} else {
} else {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: resolver returned but no LB policy; "
" chand=%p calld=%p: resolver returned but no LB policy; "
" wait_for_ready=false; failing " ,
" wait_for_ready=false; failing " ,
chand , calld ) ;
chand , calld ) ;
@ -2801,7 +2889,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
}
} else {
} else {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: resolver returned, doing pick " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: resolver returned, doing pick " ,
chand , calld ) ;
chand , calld ) ;
}
}
if ( pick_callback_start_locked ( elem ) ) {
if ( pick_callback_start_locked ( elem ) ) {
@ -2819,7 +2907,7 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
channel_data * chand = static_cast < channel_data * > ( elem - > channel_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
call_data * calld = static_cast < call_data * > ( elem - > call_data ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: deferring pick pending resolver result " , chand ,
" chand=%p calld=%p: deferring pick pending resolver result " , chand ,
calld ) ;
calld ) ;
}
}
@ -2886,7 +2974,7 @@ static void cc_start_transport_stream_op_batch(
// If we've previously been cancelled, immediately fail any new batches.
// If we've previously been cancelled, immediately fail any new batches.
if ( calld - > cancel_error ! = GRPC_ERROR_NONE ) {
if ( calld - > cancel_error ! = GRPC_ERROR_NONE ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: failing batch with error: %s " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: failing batch with error: %s " ,
chand , calld , grpc_error_string ( calld - > cancel_error ) ) ;
chand , calld , grpc_error_string ( calld - > cancel_error ) ) ;
}
}
// Note: This will release the call combiner.
// Note: This will release the call combiner.
@ -2905,7 +2993,7 @@ static void cc_start_transport_stream_op_batch(
calld - > cancel_error =
calld - > cancel_error =
GRPC_ERROR_REF ( batch - > payload - > cancel_stream . cancel_error ) ;
GRPC_ERROR_REF ( batch - > payload - > cancel_stream . cancel_error ) ;
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: recording cancel_error=%s " , chand ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: recording cancel_error=%s " , chand ,
calld , grpc_error_string ( calld - > cancel_error ) ) ;
calld , grpc_error_string ( calld - > cancel_error ) ) ;
}
}
// If we do not have a subchannel call (i.e., a pick has not yet
// If we do not have a subchannel call (i.e., a pick has not yet
@ -2931,7 +3019,7 @@ static void cc_start_transport_stream_op_batch(
// streaming calls).
// streaming calls).
if ( calld - > subchannel_call ! = nullptr ) {
if ( calld - > subchannel_call ! = nullptr ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: starting batch on subchannel_call=%p " , chand ,
" chand=%p calld=%p: starting batch on subchannel_call=%p " , chand ,
calld , calld - > subchannel_call ) ;
calld , calld - > subchannel_call ) ;
}
}
@ -2943,7 +3031,7 @@ static void cc_start_transport_stream_op_batch(
// combiner to start a pick.
// combiner to start a pick.
if ( batch - > send_initial_metadata ) {
if ( batch - > send_initial_metadata ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG , " chand=%p calld=%p: entering client_channel combiner " ,
gpr_log ( GPR_INFO , " chand=%p calld=%p: entering client_channel combiner " ,
chand , calld ) ;
chand , calld ) ;
}
}
GRPC_CLOSURE_SCHED (
GRPC_CLOSURE_SCHED (
@ -2953,7 +3041,7 @@ static void cc_start_transport_stream_op_batch(
} else {
} else {
// For all other batches, release the call combiner.
// For all other batches, release the call combiner.
if ( grpc_client_channel_trace . enabled ( ) ) {
if ( grpc_client_channel_trace . enabled ( ) ) {
gpr_log ( GPR_DEBUG ,
gpr_log ( GPR_INFO ,
" chand=%p calld=%p: saved batch, yeilding call combiner " , chand ,
" chand=%p calld=%p: saved batch, yeilding call combiner " , chand ,
calld ) ;
calld ) ;
}
}
@ -3164,6 +3252,8 @@ static void watch_connectivity_state_locked(void* arg,
external_connectivity_watcher * found = nullptr ;
external_connectivity_watcher * found = nullptr ;
if ( w - > state ! = nullptr ) {
if ( w - > state ! = nullptr ) {
external_connectivity_watcher_list_append ( w - > chand , w ) ;
external_connectivity_watcher_list_append ( w - > chand , w ) ;
// An assumption is being made that the closure is scheduled on the exec ctx
// scheduler and that GRPC_CLOSURE_RUN would run the closure immediately.
GRPC_CLOSURE_RUN ( w - > watcher_timer_init , GRPC_ERROR_NONE ) ;
GRPC_CLOSURE_RUN ( w - > watcher_timer_init , GRPC_ERROR_NONE ) ;
GRPC_CLOSURE_INIT ( & w - > my_closure , on_external_watch_complete_locked , w ,
GRPC_CLOSURE_INIT ( & w - > my_closure , on_external_watch_complete_locked , w ,
grpc_combiner_scheduler ( w - > chand - > combiner ) ) ;
grpc_combiner_scheduler ( w - > chand - > combiner ) ) ;