Add rich closure debug mode

pull/10767/head
ncteisen 8 years ago
parent 1621f5e051
commit 274bbbe6a0
  1. 6
      doc/core/grpc-error.md
  2. 2
      src/core/ext/census/grpc_filter.c
  3. 6
      src/core/ext/filters/client_channel/channel_connectivity.c
  4. 62
      src/core/ext/filters/client_channel/client_channel.c
  5. 10
      src/core/ext/filters/client_channel/http_connect_handshaker.c
  6. 2
      src/core/ext/filters/client_channel/lb_policy.c
  7. 8
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
  8. 38
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  9. 14
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  10. 14
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  11. 8
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  12. 4
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
  13. 10
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
  14. 8
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
  15. 8
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
  16. 4
      src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
  17. 12
      src/core/ext/filters/client_channel/subchannel.c
  18. 14
      src/core/ext/filters/deadline/deadline_filter.c
  19. 14
      src/core/ext/filters/http/client/http_client_filter.c
  20. 4
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  21. 14
      src/core/ext/filters/http/server/http_server_filter.c
  22. 2
      src/core/ext/filters/load_reporting/load_reporting_filter.c
  23. 18
      src/core/ext/filters/max_age/max_age_filter.c
  24. 4
      src/core/ext/filters/message_size/message_size_filter.c
  25. 4
      src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
  26. 6
      src/core/ext/transport/chttp2/client/chttp2_connector.c
  27. 2
      src/core/ext/transport/chttp2/server/chttp2_server.c
  28. 98
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  29. 2
      src/core/ext/transport/chttp2/transport/frame_data.c
  30. 4
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  31. 2
      src/core/ext/transport/chttp2/transport/writing.c
  32. 34
      src/core/ext/transport/cronet/transport/cronet_transport.c
  33. 2
      src/core/lib/channel/channel_stack.h
  34. 8
      src/core/lib/channel/handshaker.c
  35. 10
      src/core/lib/http/httpcli.c
  36. 2
      src/core/lib/http/httpcli_security_connector.c
  37. 54
      src/core/lib/iomgr/closure.c
  38. 60
      src/core/lib/iomgr/closure.h
  39. 12
      src/core/lib/iomgr/combiner.c
  40. 2
      src/core/lib/iomgr/error.h
  41. 4
      src/core/lib/iomgr/ev_epoll1_linux.c
  42. 4
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  43. 4
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
  44. 10
      src/core/lib/iomgr/ev_epollex_linux.c
  45. 4
      src/core/lib/iomgr/ev_epollsig_linux.c
  46. 16
      src/core/lib/iomgr/ev_poll_posix.c
  47. 4
      src/core/lib/iomgr/exec_ctx.c
  48. 2
      src/core/lib/iomgr/executor.c
  49. 8
      src/core/lib/iomgr/lockfree_event.c
  50. 2
      src/core/lib/iomgr/pollset_uv.c
  51. 4
      src/core/lib/iomgr/pollset_windows.c
  52. 6
      src/core/lib/iomgr/resolve_address_posix.c
  53. 6
      src/core/lib/iomgr/resolve_address_uv.c
  54. 6
      src/core/lib/iomgr/resolve_address_windows.c
  55. 56
      src/core/lib/iomgr/resource_quota.c
  56. 4
      src/core/lib/iomgr/socket_windows.c
  57. 14
      src/core/lib/iomgr/tcp_client_posix.c
  58. 4
      src/core/lib/iomgr/tcp_client_uv.c
  59. 8
      src/core/lib/iomgr/tcp_client_windows.c
  60. 14
      src/core/lib/iomgr/tcp_posix.c
  61. 10
      src/core/lib/iomgr/tcp_server_posix.c
  62. 4
      src/core/lib/iomgr/tcp_server_uv.c
  63. 8
      src/core/lib/iomgr/tcp_server_windows.c
  64. 10
      src/core/lib/iomgr/tcp_uv.c
  65. 20
      src/core/lib/iomgr/tcp_windows.c
  66. 8
      src/core/lib/iomgr/timer_generic.c
  67. 6
      src/core/lib/iomgr/timer_uv.c
  68. 12
      src/core/lib/iomgr/udp_server.c
  69. 4
      src/core/lib/security/credentials/fake/fake_credentials.c
  70. 4
      src/core/lib/security/credentials/google_default/google_default_credentials.c
  71. 6
      src/core/lib/security/credentials/jwt/jwt_verifier.c
  72. 4
      src/core/lib/security/credentials/oauth2/oauth2_credentials.c
  73. 6
      src/core/lib/security/transport/secure_endpoint.c
  74. 8
      src/core/lib/security/transport/security_connector.c
  75. 12
      src/core/lib/security/transport/security_handshaker.c
  76. 8
      src/core/lib/security/transport/server_auth_filter.c
  77. 2
      src/core/lib/surface/alarm.c
  78. 20
      src/core/lib/surface/call.c
  79. 2
      src/core/lib/surface/channel_ping.c
  80. 6
      src/core/lib/surface/completion_queue.c
  81. 8
      src/core/lib/surface/lame_client.cc
  82. 38
      src/core/lib/surface/server.c
  83. 10
      src/core/lib/transport/connectivity_state.c
  84. 18
      src/core/lib/transport/transport.c
  85. 4
      test/core/bad_client/bad_client.c
  86. 10
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
  87. 4
      test/core/client_channel/resolvers/fake_resolver_test.c
  88. 2
      test/core/client_channel/resolvers/sockaddr_resolver_test.c
  89. 4
      test/core/end2end/bad_server_response_test.c
  90. 16
      test/core/end2end/fixtures/http_proxy_fixture.c
  91. 16
      test/core/end2end/fuzzers/api_fuzzer.c
  92. 4
      test/core/end2end/goaway_server_test.c
  93. 4
      test/core/end2end/tests/filter_causes_close.c
  94. 6
      test/core/http/httpcli_test.c
  95. 6
      test/core/http/httpscli_test.c
  96. 20
      test/core/iomgr/combiner_test.c
  97. 2
      test/core/iomgr/endpoint_pair_test.c
  98. 10
      test/core/iomgr/endpoint_tests.c
  99. 6
      test/core/iomgr/ev_epollsig_linux_test.c
  100. 12
      test/core/iomgr/fd_posix_test.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -83,12 +83,12 @@ c->cb(exec_ctx, c->cb_arg, err);
The caller is still responsible for unref-ing the error. The caller is still responsible for unref-ing the error.
However, the above line is currently being phased out! It is safer to invoke However, the above line is currently being phased out! It is safer to invoke
callbacks with `grpc_closure_run` and `grpc_closure_sched`. These functions are callbacks with `GRPC_CLOSURE_RUN` and `GRPC_CLOSURE_SCHED`. These functions are
not callbacks, so they will take ownership of the error passed to them. not callbacks, so they will take ownership of the error passed to them.
```C ```C
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occured"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occured");
grpc_closure_run(exec_ctx, cb, error); GRPC_CLOSURE_RUN(exec_ctx, cb, error);
// current function no longer has ownership of the error // current function no longer has ownership of the error
``` ```
@ -97,7 +97,7 @@ you must explicitly take a reference.
```C ```C
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occured"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occured");
grpc_closure_run(exec_ctx, cb, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, cb, GRPC_ERROR_REF(error));
// do some other things with the error // do some other things with the error
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
``` ```

@ -141,7 +141,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
memset(d, 0, sizeof(*d)); memset(d, 0, sizeof(*d));
d->start_ts = args->start_time; d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */ /* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem, GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -211,9 +211,9 @@ void grpc_channel_watch_connectivity_state(
grpc_cq_begin_op(cq, tag); grpc_cq_begin_op(cq, tag);
gpr_mu_init(&w->mu); gpr_mu_init(&w->mu);
grpc_closure_init(&w->on_complete, watch_complete, w, GRPC_CLOSURE_INIT(&w->on_complete, watch_complete, w,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&w->on_timeout, timeout_complete, w, GRPC_CLOSURE_INIT(&w->on_timeout, timeout_complete, w,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
w->phase = WAITING; w->phase = WAITING;
w->state = last_observed_state; w->state = last_observed_state;
@ -225,7 +225,7 @@ void grpc_channel_watch_connectivity_state(
watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg)); watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w; wa->w = w;
wa->deadline = deadline; wa->deadline = deadline;
grpc_closure_init(&w->watcher_timer_init, watcher_timer_init, wa, GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
if (client_channel_elem->filter == &grpc_client_channel_filter) { if (client_channel_elem->filter == &grpc_client_channel_filter) {

@ -275,7 +275,7 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand; w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w, GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
grpc_combiner_scheduler(chand->combiner)); grpc_combiner_scheduler(chand->combiner));
w->state = current_state; w->state = current_state;
w->lb_policy = lb_policy; w->lb_policy = lb_policy;
@ -364,7 +364,7 @@ static void wrapped_on_pick_closure_cb(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(wc_arg != NULL); GPR_ASSERT(wc_arg != NULL);
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GPR_ASSERT(wc_arg->lb_policy != NULL); GPR_ASSERT(wc_arg->lb_policy != NULL);
grpc_closure_run(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->lb_policy, "pick_subchannel_wrapping"); GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->lb_policy, "pick_subchannel_wrapping");
gpr_free(wc_arg); gpr_free(wc_arg);
} }
@ -506,12 +506,12 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
} }
chand->method_params_table = method_params_table; chand->method_params_table = method_params_table;
if (lb_policy != NULL) { if (lb_policy != NULL) {
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
} else if (chand->resolver == NULL /* disconnected */) { } else if (chand->resolver == NULL /* disconnected */) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures, grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1)); "Channel disconnected", &error, 1));
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
} }
if (!lb_policy_updated && lb_policy != NULL && if (!lb_policy_updated && lb_policy != NULL &&
chand->exit_idle_when_lb_policy_arrives) { chand->exit_idle_when_lb_policy_arrives) {
@ -583,7 +583,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (op->send_ping != NULL) { if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) { if (chand->lb_policy == NULL) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, op->send_ping, exec_ctx, op->send_ping,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else { } else {
@ -604,7 +604,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (!chand->started_resolving) { if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures, grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
GRPC_ERROR_REF(op->disconnect_with_error)); GRPC_ERROR_REF(op->disconnect_with_error));
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
} }
if (chand->lb_policy != NULL) { if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, grpc_pollset_set_del_pollset_set(exec_ctx,
@ -618,7 +618,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
} }
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op"); GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
} }
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -634,9 +634,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->handler_private.extra_arg = elem; op->handler_private.extra_arg = elem;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_init(&op->handler_private.closure, start_transport_op_locked, GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
op, grpc_combiner_scheduler(chand->combiner)), op, grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -677,7 +677,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
chand->owning_stack = args->channel_stack; chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed, GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
on_resolver_result_changed_locked, chand, on_resolver_result_changed_locked, chand,
grpc_combiner_scheduler(chand->combiner)); grpc_combiner_scheduler(chand->combiner));
chand->interested_parties = grpc_pollset_set_create(); chand->interested_parties = grpc_pollset_set_create();
@ -737,8 +737,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) { grpc_channel_element *elem) {
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) { if (chand->resolver != NULL) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_create(shutdown_resolver_locked, chand->resolver, exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)), grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -1038,13 +1038,13 @@ static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (cpa->connected_subchannel == NULL) { if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */ /* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) { } else if (error != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
} else { } else {
if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata, if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags, cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->connected_subchannel,
cpa->subchannel_call_context, cpa->on_ready)) { cpa->subchannel_call_context, cpa->on_ready)) {
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
} }
} }
gpr_free(cpa); gpr_free(cpa);
@ -1064,7 +1064,7 @@ static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
continue_picking_args *cpa = closure->cb_arg; continue_picking_args *cpa = closure->cb_arg;
if (cpa->connected_subchannel == &calld->connected_subchannel) { if (cpa->connected_subchannel == &calld->connected_subchannel) {
cpa->connected_subchannel = NULL; cpa->connected_subchannel = NULL;
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1)); "Pick cancelled", &error, 1));
} }
@ -1113,7 +1113,7 @@ static bool pick_subchannel_locked(
// the LB policy for the duration of the pick. // the LB policy for the duration of the pick.
wrapped_on_pick_closure_arg *w_on_pick_arg = wrapped_on_pick_closure_arg *w_on_pick_arg =
gpr_zalloc(sizeof(*w_on_pick_arg)); gpr_zalloc(sizeof(*w_on_pick_arg));
grpc_closure_init(&w_on_pick_arg->wrapper_closure, GRPC_CLOSURE_INIT(&w_on_pick_arg->wrapper_closure,
wrapped_on_pick_closure_cb, w_on_pick_arg, wrapped_on_pick_closure_cb, w_on_pick_arg,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
w_on_pick_arg->wrapped_closure = on_ready; w_on_pick_arg->wrapped_closure = on_ready;
@ -1147,12 +1147,12 @@ static bool pick_subchannel_locked(
cpa->subchannel_call_context = subchannel_call_context; cpa->subchannel_call_context = subchannel_call_context;
cpa->on_ready = on_ready; cpa->on_ready = on_ready;
cpa->elem = elem; cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking_locked, cpa, GRPC_CLOSURE_INIT(&cpa->closure, continue_picking_locked, cpa,
grpc_combiner_scheduler(chand->combiner)); grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {
grpc_closure_sched(exec_ctx, on_ready, GRPC_CLOSURE_SCHED(exec_ctx, on_ready,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} }
@ -1202,7 +1202,7 @@ static void start_transport_stream_op_batch_locked_inner(
if (!calld->pick_pending && calld->connected_subchannel == NULL && if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) { op->send_initial_metadata) {
calld->pick_pending = true; calld->pick_pending = true;
grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem, GRPC_CLOSURE_INIT(&calld->next_step, subchannel_ready_locked, elem,
grpc_combiner_scheduler(chand->combiner)); grpc_combiner_scheduler(chand->combiner));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from /* If a subchannel is not available immediately, the polling entity from
@ -1275,7 +1275,7 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
calld->retry_throttle_data); calld->retry_throttle_data);
} }
} }
grpc_closure_run(exec_ctx, calld->original_on_complete, GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -1291,7 +1291,7 @@ static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
if (op->recv_trailing_metadata) { if (op->recv_trailing_metadata) {
GPR_ASSERT(op->on_complete != NULL); GPR_ASSERT(op->on_complete != NULL);
calld->original_on_complete = op->on_complete; calld->original_on_complete = op->on_complete;
grpc_closure_init(&calld->on_complete, on_complete, elem, GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
op->on_complete = &calld->on_complete; op->on_complete = &calld->on_complete;
} }
@ -1340,8 +1340,8 @@ static void cc_start_transport_stream_op_batch(
/* we failed; lock and figure out what to do */ /* we failed; lock and figure out what to do */
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch"); GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
op->handler_private.extra_arg = elem; op->handler_private.extra_arg = elem;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_init(&op->handler_private.closure, exec_ctx, GRPC_CLOSURE_INIT(&op->handler_private.closure,
start_transport_stream_op_batch_locked, op, start_transport_stream_op_batch_locked, op,
grpc_combiner_scheduler(chand->combiner)), grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -1402,7 +1402,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
} }
} }
gpr_free(calld->waiting_ops); gpr_free(calld->waiting_ops);
grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
} }
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@ -1456,8 +1456,8 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_connectivity_state_check(&chand->state_tracker); grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) { if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_create(try_to_connect_locked, chand, exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)), grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -1547,7 +1547,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
"external_connectivity_watcher"); "external_connectivity_watcher");
external_connectivity_watcher_list_remove(w->chand, w); external_connectivity_watcher_list_remove(w->chand, w);
gpr_free(w); gpr_free(w);
grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
} }
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg, static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -1556,8 +1556,8 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
external_connectivity_watcher *found = NULL; external_connectivity_watcher *found = NULL;
if (w->state != NULL) { if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w); external_connectivity_watcher_list_append(w->chand, w);
grpc_closure_run(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE); GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
grpc_closure_init(&w->my_closure, on_external_watch_complete, w, GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_connectivity_state_notify_on_state_change( grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure); exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
@ -1592,9 +1592,9 @@ void grpc_client_channel_watch_connectivity_state(
chand->interested_parties); chand->interested_parties);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher"); "external_connectivity_watcher");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w, GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
grpc_combiner_scheduler(chand->combiner)), grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }

@ -118,7 +118,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
handshaker->shutdown = true; handshaker->shutdown = true;
} }
// Invoke callback. // Invoke callback.
grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error); GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
} }
// Callback invoked when finished writing HTTP CONNECT request. // Callback invoked when finished writing HTTP CONNECT request.
@ -217,7 +217,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
goto done; goto done;
} }
// Success. Invoke handshake-done callback. // Success. Invoke handshake-done callback.
grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error); GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
done: done:
// Set shutdown to true so that subsequent calls to // Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing. // http_connect_handshaker_shutdown() do nothing.
@ -266,7 +266,7 @@ static void http_connect_handshaker_do_handshake(
gpr_mu_lock(&handshaker->mu); gpr_mu_lock(&handshaker->mu);
handshaker->shutdown = true; handshaker->shutdown = true;
gpr_mu_unlock(&handshaker->mu); gpr_mu_unlock(&handshaker->mu);
grpc_closure_sched(exec_ctx, on_handshake_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, GRPC_ERROR_NONE);
return; return;
} }
GPR_ASSERT(arg->type == GRPC_ARG_STRING); GPR_ASSERT(arg->type == GRPC_ARG_STRING);
@ -339,9 +339,9 @@ static grpc_handshaker* grpc_http_connect_handshaker_create() {
gpr_mu_init(&handshaker->mu); gpr_mu_init(&handshaker->mu);
gpr_ref_init(&handshaker->refcount, 1); gpr_ref_init(&handshaker->refcount, 1);
grpc_slice_buffer_init(&handshaker->write_buffer); grpc_slice_buffer_init(&handshaker->write_buffer);
grpc_closure_init(&handshaker->request_done_closure, on_write_done, GRPC_CLOSURE_INIT(&handshaker->request_done_closure, on_write_done,
handshaker, grpc_schedule_on_exec_ctx); handshaker, grpc_schedule_on_exec_ctx);
grpc_closure_init(&handshaker->response_read_closure, on_read_done, GRPC_CLOSURE_INIT(&handshaker->response_read_closure, on_read_done,
handshaker, grpc_schedule_on_exec_ctx); handshaker, grpc_schedule_on_exec_ctx);
grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE, grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
&handshaker->http_response); &handshaker->http_response);

@ -74,7 +74,7 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1); gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS; gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) { if ((old_val & mask) == check) {
grpc_closure_sched(exec_ctx, grpc_closure_create( GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
shutdown_locked, policy, shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)), grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);

@ -53,7 +53,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true; calld->send_initial_metadata_succeeded = true;
} }
grpc_closure_run(exec_ctx, calld->original_on_complete_for_send, GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete_for_send,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -63,7 +63,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true; calld->recv_initial_metadata_succeeded = true;
} }
grpc_closure_run(exec_ctx, calld->original_recv_initial_metadata_ready, GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -104,7 +104,7 @@ static void start_transport_stream_op_batch(
// Intercept send_initial_metadata. // Intercept send_initial_metadata.
if (batch->send_initial_metadata) { if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete; calld->original_on_complete_for_send = batch->on_complete;
grpc_closure_init(&calld->on_complete_for_send, on_complete_for_send, calld, GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, calld,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send; batch->on_complete = &calld->on_complete_for_send;
} }
@ -112,7 +112,7 @@ static void start_transport_stream_op_batch(
if (batch->recv_initial_metadata) { if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready = calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready; batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
grpc_closure_init(&calld->recv_initial_metadata_ready, GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld, recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready = batch->payload->recv_initial_metadata.recv_initial_metadata_ready =

@ -184,7 +184,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wrapped_rr_closure_arg *wc_arg = arg; wrapped_rr_closure_arg *wc_arg = arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
if (wc_arg->rr_policy != NULL) { if (wc_arg->rr_policy != NULL) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all /* if *target is NULL, no pick has been made by the RR policy (eg, all
@ -256,7 +256,7 @@ static void add_pending_pick(pending_pick **root,
pp->wrapped_on_complete_arg.lb_token_mdelem_storage = pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage; pick_args->lb_token_mdelem_storage;
pp->wrapped_on_complete_arg.free_when_done = pp; pp->wrapped_on_complete_arg.free_when_done = pp;
grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
wrapped_rr_closure, &pp->wrapped_on_complete_arg, wrapped_rr_closure, &pp->wrapped_on_complete_arg,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
*root = pp; *root = pp;
@ -275,7 +275,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pping->wrapped_notify_arg.wrapped_closure = notify; pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping; pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root; pping->next = *root;
grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure, GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
wrapped_rr_closure, &pping->wrapped_notify_arg, wrapped_rr_closure, &pping->wrapped_notify_arg,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
*root = pping; *root = pping;
@ -635,7 +635,7 @@ static bool pick_from_internal_rr_locked(
grpc_grpclb_client_stats_unref(wc_arg->client_stats); grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) { if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done); gpr_free(wc_arg->free_when_done);
return false; return false;
} }
@ -663,7 +663,7 @@ static bool pick_from_internal_rr_locked(
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats; wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
if (force_async) { if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done); gpr_free(wc_arg->free_when_done);
return false; return false;
} }
@ -739,7 +739,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
* It'll be deallocated in glb_rr_connectivity_changed() */ * It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity = rr_connectivity_data *rr_connectivity =
gpr_zalloc(sizeof(rr_connectivity_data)); gpr_zalloc(sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity, glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
rr_connectivity->glb_policy = glb_policy; rr_connectivity->glb_policy = glb_policy;
@ -1004,7 +1004,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL; return NULL;
} }
grpc_closure_init(&glb_policy->lb_channel_on_connectivity_changed, GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
glb_lb_channel_on_connectivity_changed_cb, glb_policy, glb_lb_channel_on_connectivity_changed_cb, glb_policy,
grpc_combiner_scheduler(args->combiner)); grpc_combiner_scheduler(args->combiner));
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner); grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
@ -1078,14 +1078,14 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
pp = next; pp = next;
} }
while (pping != NULL) { while (pping != NULL) {
pending_ping *next = pping->next; pending_ping *next = pping->next;
grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
pping = next; pping = next;
} }
@ -1101,7 +1101,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
} else { } else {
@ -1125,7 +1125,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
} else { } else {
@ -1160,7 +1160,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) { grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) { if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL; *target = NULL;
grpc_closure_sched(exec_ctx, on_complete, GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No mdelem storage for the LB token. Load reporting " "No mdelem storage for the LB token. Load reporting "
"won't work without it. Failing")); "won't work without it. Failing"));
@ -1179,7 +1179,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg)); wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg, GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy; wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target; wc_arg->target = target;
@ -1250,7 +1250,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_timespec next_client_load_report_time = const gpr_timespec next_client_load_report_time =
gpr_time_add(now, glb_policy->client_stats_report_interval); gpr_time_add(now, glb_policy->client_stats_report_interval);
grpc_closure_init(&glb_policy->client_load_report_closure, GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy, send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer, grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
@ -1278,7 +1278,7 @@ static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE; op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload; op.data.send_message.send_message = glb_policy->client_load_report_payload;
grpc_closure_init(&glb_policy->client_load_report_closure, GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy, client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute( grpc_call_error call_error = grpc_call_start_batch_and_execute(
@ -1384,13 +1384,13 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice); grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request); grpc_grpclb_request_destroy(request);
grpc_closure_init(&glb_policy->lb_on_sent_initial_request, GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy, lb_on_sent_initial_request_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_closure_init(&glb_policy->lb_on_server_status_received, GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy, lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_closure_init(&glb_policy->lb_on_response_received, GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
lb_on_response_received_locked, glb_policy, lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
@ -1693,7 +1693,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
} }
} }
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
grpc_closure_init(&glb_policy->lb_on_call_retry, GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy, lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true; glb_policy->retry_timer_active = true;

@ -118,7 +118,7 @@ static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
pp = next; pp = next;
} }
@ -135,7 +135,7 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
@ -160,7 +160,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
@ -258,7 +258,7 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (p->selected) { if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure); grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else { } else {
grpc_closure_sched(exec_ctx, closure, GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
} }
} }
@ -557,7 +557,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"Servicing pending pick with selected subchannel %p", "Servicing pending pick with selected subchannel %p",
(void *)p->selected); (void *)p->selected);
} }
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
grpc_connected_subchannel_notify_on_state_change( grpc_connected_subchannel_notify_on_state_change(
@ -610,7 +610,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@ -654,7 +654,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p = gpr_zalloc(sizeof(*p)); pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
pf_update_locked(exec_ctx, &p->base, args); pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner); grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p, GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner)); grpc_combiner_scheduler(args->combiner));
return &p->base; return &p->base;
} }

@ -288,7 +288,7 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, pp->on_complete, exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp); gpr_free(pp);
@ -311,7 +311,7 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1)); "Pick cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
@ -336,7 +336,7 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1)); "Pick cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
@ -553,7 +553,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
} }
@ -614,7 +614,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
(void *)selected->subchannel, (void *)selected->subchannel,
(unsigned long)next_ready_index); (unsigned long)next_ready_index);
} }
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
} }
@ -655,7 +655,7 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure); grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked"); GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else { } else {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected")); "Round Robin not connected"));
} }
} }
@ -747,7 +747,7 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++]; subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list; sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel; sd->subchannel = subchannel;
grpc_closure_init(&sd->connectivity_changed_closure, GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd, rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner)); grpc_combiner_scheduler(args->combiner));
/* use some sentinel value outside of the range of /* use some sentinel value outside of the range of

@ -116,7 +116,7 @@ static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
} }
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, r->next_completion, exec_ctx, r->next_completion,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL; r->next_completion = NULL;
@ -221,7 +221,7 @@ static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
? NULL ? NULL
: grpc_channel_args_copy(r->resolved_result); : grpc_channel_args_copy(r->resolved_result);
gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked"); gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked");
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
r->published_version = r->resolved_version; r->published_version = r->resolved_version;
} }
@ -266,10 +266,10 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
GRPC_DNS_RECONNECT_JITTER, GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
grpc_closure_init(&r->dns_ares_on_retry_timer_locked, GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r, dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner)); grpc_combiner_scheduler(r->base.combiner));
grpc_closure_init(&r->dns_ares_on_resolved_locked, GRPC_CLOSURE_INIT(&r->dns_ares_on_resolved_locked,
dns_ares_on_resolved_locked, r, dns_ares_on_resolved_locked, r,
grpc_combiner_scheduler(r->base.combiner)); grpc_combiner_scheduler(r->base.combiner));
return &r->base; return &r->base;

@ -257,9 +257,9 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
fdn->readable_registered = false; fdn->readable_registered = false;
fdn->writable_registered = false; fdn->writable_registered = false;
gpr_mu_init(&fdn->mu); gpr_mu_init(&fdn->mu);
grpc_closure_init(&fdn->read_closure, on_readable_cb, fdn, GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&fdn->write_closure, on_writable_cb, fdn, GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
fdn->grpc_fd); fdn->grpc_fd);

@ -107,10 +107,10 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
acquire locks in on_done. ares_dns_resolver is using combiner to acquire locks in on_done. ares_dns_resolver is using combiner to
protect resources needed by on_done. */ protect resources needed by on_done. */
grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_sched(&new_exec_ctx, r->on_done, r->error); GRPC_CLOSURE_SCHED(&new_exec_ctx, r->on_done, r->error);
grpc_exec_ctx_finish(&new_exec_ctx); grpc_exec_ctx_finish(&new_exec_ctx);
} else { } else {
grpc_closure_sched(exec_ctx, r->on_done, r->error); GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, r->error);
} }
gpr_mu_destroy(&r->mu); gpr_mu_destroy(&r->mu);
grpc_ares_ev_driver_destroy(r->ev_driver); grpc_ares_ev_driver_destroy(r->ev_driver);
@ -370,7 +370,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
return r; return r;
error_cleanup: error_cleanup:
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
gpr_free(host); gpr_free(host);
gpr_free(port); gpr_free(port);
return NULL; return NULL;
@ -445,7 +445,7 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
&r->lb_addrs->addresses[i].address, sizeof(grpc_resolved_address)); &r->lb_addrs->addresses[i].address, sizeof(grpc_resolved_address));
} }
} }
grpc_closure_sched(exec_ctx, r->on_resolve_address_done, GRPC_CLOSURE_SCHED(exec_ctx, r->on_resolve_address_done,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
grpc_lb_addresses_destroy(exec_ctx, r->lb_addrs); grpc_lb_addresses_destroy(exec_ctx, r->lb_addrs);
gpr_free(r); gpr_free(r);
@ -461,7 +461,7 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
gpr_zalloc(sizeof(grpc_resolve_address_ares_request)); gpr_zalloc(sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs; r->addrs_out = addrs;
r->on_resolve_address_done = on_done; r->on_resolve_address_done = on_done;
grpc_closure_init(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r, GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port, grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port,
interested_parties, &r->on_dns_lookup_done, &r->lb_addrs, interested_parties, &r->on_dns_lookup_done, &r->lb_addrs,

@ -99,7 +99,7 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
} }
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, r->next_completion, exec_ctx, r->next_completion,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL; r->next_completion = NULL;
@ -178,7 +178,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else { } else {
gpr_log(GPR_DEBUG, "retrying immediately"); gpr_log(GPR_DEBUG, "retrying immediately");
} }
grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r, GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner)); grpc_combiner_scheduler(r->base.combiner));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now); grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
} }
@ -200,7 +200,7 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->addresses = NULL; r->addresses = NULL;
grpc_resolve_address( grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties, exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
grpc_closure_create(dns_on_resolved_locked, r, GRPC_CLOSURE_CREATE(dns_on_resolved_locked, r,
grpc_combiner_scheduler(r->base.combiner)), grpc_combiner_scheduler(r->base.combiner)),
&r->addresses); &r->addresses);
} }
@ -212,7 +212,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = r->resolved_result == NULL *r->target_result = r->resolved_result == NULL
? NULL ? NULL
: grpc_channel_args_copy(r->resolved_result); : grpc_channel_args_copy(r->resolved_result);
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
r->published_version = r->resolved_version; r->published_version = r->resolved_version;
} }

@ -74,7 +74,7 @@ static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
fake_resolver* r = (fake_resolver*)resolver; fake_resolver* r = (fake_resolver*)resolver;
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
} }
@ -85,7 +85,7 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
*r->target_result = *r->target_result =
grpc_channel_args_union(r->next_results, r->channel_args); grpc_channel_args_union(r->next_results, r->channel_args);
grpc_channel_args_destroy(exec_ctx, r->next_results); grpc_channel_args_destroy(exec_ctx, r->next_results);
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
r->next_results = NULL; r->next_results = NULL;
} }
@ -157,8 +157,8 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_channel_args* next_response) { grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL); GPR_ASSERT(generator->resolver != NULL);
generator->next_response = grpc_channel_args_copy(next_response); generator->next_response = grpc_channel_args_copy(next_response);
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_create(set_response_cb, generator, exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator,
grpc_combiner_scheduler( grpc_combiner_scheduler(
generator->resolver->base.combiner)), generator->resolver->base.combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);

@ -73,7 +73,7 @@ static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r = (sockaddr_resolver *)resolver; sockaddr_resolver *r = (sockaddr_resolver *)resolver;
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
} }
@ -103,7 +103,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result = *r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
} }

@ -283,7 +283,7 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs; gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF")); old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) { if (old_refs == 1) {
grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c, GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
grpc_schedule_on_exec_ctx), grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -333,7 +333,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args); if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev = c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher; &c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c, GRPC_CLOSURE_INIT(&c->connected, subchannel_connected, c,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel"); "subchannel");
@ -421,7 +421,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&w->subchannel->mu); gpr_mu_unlock(&w->subchannel->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher"); GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
gpr_free(w); gpr_free(w);
grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
} }
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@ -488,7 +488,7 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds", gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
time_til_next.tv_sec, time_til_next.tv_nsec); time_til_next.tv_sec, time_til_next.tv_nsec);
} }
grpc_closure_init(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now); grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
} }
} }
@ -514,7 +514,7 @@ void grpc_subchannel_notify_on_state_change(
w->subchannel = c; w->subchannel = c;
w->pollset_set = interested_parties; w->pollset_set = interested_parties;
w->notify = notify; w->notify = notify;
grpc_closure_init(&w->closure, on_external_state_watcher_done, w, GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
if (interested_parties != NULL) { if (interested_parties != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set, grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
@ -635,7 +635,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
sw_subchannel = gpr_malloc(sizeof(*sw_subchannel)); sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c; sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY; sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed, GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
sw_subchannel, grpc_schedule_on_exec_ctx); sw_subchannel, grpc_schedule_on_exec_ctx);
if (c->disconnected) { if (c->disconnected) {

@ -74,7 +74,7 @@ retry:
// If we've already created and destroyed a timer, we always create a // If we've already created and destroyed a timer, we always create a
// new closure: we have no other guarantee that the inlined closure is // new closure: we have no other guarantee that the inlined closure is
// not in use (it may hold a pending call to timer_callback) // not in use (it may hold a pending call to timer_callback)
closure = grpc_closure_create(timer_callback, elem, closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
} else { } else {
goto retry; goto retry;
@ -85,7 +85,7 @@ retry:
GRPC_DEADLINE_STATE_INITIAL, GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING)) { GRPC_DEADLINE_STATE_PENDING)) {
closure = closure =
grpc_closure_init(&deadline_state->timer_callback, timer_callback, GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
elem, grpc_schedule_on_exec_ctx); elem, grpc_schedule_on_exec_ctx);
} else { } else {
goto retry; goto retry;
@ -115,7 +115,7 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg; grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state); cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback. // Invoke the next callback.
grpc_closure_run(exec_ctx, deadline_state->next_on_complete, GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -123,7 +123,7 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
static void inject_on_complete_cb(grpc_deadline_state* deadline_state, static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
grpc_transport_stream_op_batch* op) { grpc_transport_stream_op_batch* op) {
deadline_state->next_on_complete = op->on_complete; deadline_state->next_on_complete = op->on_complete;
grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state, GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
op->on_complete = &deadline_state->on_complete; op->on_complete = &deadline_state->on_complete;
} }
@ -161,9 +161,9 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state)); struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
state->elem = elem; state->elem = elem;
state->deadline = deadline; state->deadline = deadline;
grpc_closure_init(&state->closure, start_timer_after_init, state, GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE);
} }
} }
@ -281,7 +281,7 @@ static void server_start_transport_stream_op_batch(
op->payload->recv_initial_metadata.recv_initial_metadata_ready; op->payload->recv_initial_metadata.recv_initial_metadata_ready;
calld->recv_initial_metadata = calld->recv_initial_metadata =
op->payload->recv_initial_metadata.recv_initial_metadata; op->payload->recv_initial_metadata.recv_initial_metadata;
grpc_closure_init(&calld->recv_initial_metadata_ready, GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem, recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
op->payload->recv_initial_metadata.recv_initial_metadata_ready = op->payload->recv_initial_metadata.recv_initial_metadata_ready =

@ -158,7 +158,7 @@ static void hc_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error); GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
} }
static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
@ -171,7 +171,7 @@ static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
grpc_closure_run(exec_ctx, calld->on_done_recv_trailing_metadata, error); GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_trailing_metadata, error);
} }
static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@ -445,17 +445,17 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->payload_bytes = NULL; calld->payload_bytes = NULL;
calld->send_message_blocked = false; calld->send_message_blocked = false;
grpc_slice_buffer_init(&calld->slices); grpc_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv_initial_metadata, GRPC_CLOSURE_INIT(&calld->hc_on_recv_initial_metadata,
hc_on_recv_initial_metadata, elem, hc_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hc_on_recv_trailing_metadata, GRPC_CLOSURE_INIT(&calld->hc_on_recv_trailing_metadata,
hc_on_recv_trailing_metadata, elem, hc_on_recv_trailing_metadata, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem, GRPC_CLOSURE_INIT(&calld->hc_on_complete, hc_on_complete, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->got_slice, got_slice, elem, GRPC_CLOSURE_INIT(&calld->got_slice, got_slice, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->send_done, send_done, elem, GRPC_CLOSURE_INIT(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -364,9 +364,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */ /* initialize members */
grpc_slice_buffer_init(&calld->slices); grpc_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->got_slice, got_slice, elem, GRPC_CLOSURE_INIT(&calld->got_slice, got_slice, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->send_done, send_done, elem, GRPC_CLOSURE_INIT(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;

@ -269,7 +269,7 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
} else { } else {
GRPC_ERROR_REF(err); GRPC_ERROR_REF(err);
} }
grpc_closure_run(exec_ctx, calld->on_done_recv, err); GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err);
} }
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@ -281,11 +281,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered *calld->pp_recv_message = calld->payload_bin_delivered
? NULL ? NULL
: (grpc_byte_stream *)&calld->read_stream; : (grpc_byte_stream *)&calld->read_stream;
grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
calld->recv_message_ready = NULL; calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true; calld->payload_bin_delivered = true;
} }
grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err)); GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
} }
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
@ -296,7 +296,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* do nothing. This is probably a GET request, and payload will be returned /* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */ in hs_on_complete callback. */
} else { } else {
grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
} }
} }
@ -383,11 +383,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
/* initialize members */ /* initialize members */
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem, GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem, GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem, GRPC_CLOSURE_INIT(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&calld->read_slice_buffer); grpc_slice_buffer_init(&calld->read_slice_buffer);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;

@ -90,7 +90,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
calld->id = (intptr_t)args->call_stack; calld->id = (intptr_t)args->call_stack;
grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem, GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
/* TODO(dgq): do something with the data /* TODO(dgq): do something with the data

@ -329,23 +329,23 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
: gpr_time_from_millis(value, GPR_TIMESPAN); : gpr_time_from_millis(value, GPR_TIMESPAN);
} }
} }
grpc_closure_init(&chand->close_max_idle_channel, close_max_idle_channel, GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
chand, grpc_schedule_on_exec_ctx); chand, grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->close_max_age_channel, close_max_age_channel, chand, GRPC_CLOSURE_INIT(&chand->close_max_age_channel, close_max_age_channel, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->force_close_max_age_channel, GRPC_CLOSURE_INIT(&chand->force_close_max_age_channel,
force_close_max_age_channel, chand, force_close_max_age_channel, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->start_max_idle_timer_after_init, GRPC_CLOSURE_INIT(&chand->start_max_idle_timer_after_init,
start_max_idle_timer_after_init, chand, start_max_idle_timer_after_init, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->start_max_age_timer_after_init, GRPC_CLOSURE_INIT(&chand->start_max_age_timer_after_init,
start_max_age_timer_after_init, chand, start_max_age_timer_after_init, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->start_max_age_grace_timer_after_goaway_op, GRPC_CLOSURE_INIT(&chand->start_max_age_grace_timer_after_goaway_op,
start_max_age_grace_timer_after_goaway_op, chand, start_max_age_grace_timer_after_goaway_op, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&chand->channel_connectivity_changed, GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand, channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -360,7 +360,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
initialization is done. */ initialization is done. */
GRPC_CHANNEL_STACK_REF(chand->channel_stack, GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_age_timer_after_init"); "max_age start_max_age_timer_after_init");
grpc_closure_sched(exec_ctx, &chand->start_max_age_timer_after_init, GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_age_timer_after_init,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -371,7 +371,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
0) { 0) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack, GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init"); "max_age start_max_idle_timer_after_init");
grpc_closure_sched(exec_ctx, &chand->start_max_idle_timer_after_init, GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;

@ -110,7 +110,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
// Invoke the next callback. // Invoke the next callback.
grpc_closure_run(exec_ctx, calld->next_recv_message_ready, error); GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_message_ready, error);
} }
// Start transport stream op. // Start transport stream op.
@ -152,7 +152,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
channel_data* chand = elem->channel_data; channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data; call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL; calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem, GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values. // Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we // Note: Per-method config is only available on the client, so we

@ -67,7 +67,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
} }
// Invoke the next callback. // Invoke the next callback.
grpc_closure_run(exec_ctx, calld->next_recv_initial_metadata_ready, GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_initial_metadata_ready,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -106,7 +106,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
call_data* calld = elem->call_data; call_data* calld = elem->call_data;
calld->next_recv_initial_metadata_ready = NULL; calld->next_recv_initial_metadata_ready = NULL;
calld->workaround_active = false; calld->workaround_active = false;
grpc_closure_init(&calld->recv_initial_metadata_ready, GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem, recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;

@ -124,7 +124,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
} }
grpc_closure *notify = c->notify; grpc_closure *notify = c->notify;
c->notify = NULL; c->notify = NULL;
grpc_closure_sched(exec_ctx, notify, error); GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
c->handshake_mgr = NULL; c->handshake_mgr = NULL;
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
@ -156,7 +156,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
memset(c->result, 0, sizeof(*c->result)); memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify; grpc_closure *notify = c->notify;
c->notify = NULL; c->notify = NULL;
grpc_closure_sched(exec_ctx, notify, error); GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
if (c->endpoint != NULL) { if (c->endpoint != NULL) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error)); grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
} }
@ -184,7 +184,7 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
c->result = result; c->result = result;
GPR_ASSERT(c->endpoint == NULL); GPR_ASSERT(c->endpoint == NULL);
chttp2_connector_ref(con); // Ref taken for callback. chttp2_connector_ref(con); // Ref taken for callback.
grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
GPR_ASSERT(!c->connecting); GPR_ASSERT(!c->connecting);
c->connecting = true; c->connecting = true;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint, grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,

@ -209,7 +209,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
goto error; goto error;
} }
state = gpr_zalloc(sizeof(*state)); state = gpr_zalloc(sizeof(*state));
grpc_closure_init(&state->tcp_server_shutdown_complete, GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state, tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete, err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,

@ -269,30 +269,30 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_init(&t->outbuf); grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor); grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
grpc_closure_init(&t->write_action, write_action, t, GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&t->read_action_locked, read_action_locked, t, GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t, GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->destructive_reclaimer_locked, GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t, destructive_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->retry_initiate_ping_locked, retry_initiate_ping_locked, GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
t, grpc_combiner_scheduler(t->combiner)); t, grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->start_bdp_ping_locked, start_bdp_ping_locked, t, GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t, GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->init_keepalive_ping_locked, init_keepalive_ping_locked, GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
t, grpc_combiner_scheduler(t->combiner)); t, grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->start_keepalive_ping_locked, GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t, start_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->finish_keepalive_ping_locked, GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t, finish_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->keepalive_watchdog_fired_locked, GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t, keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
@ -567,8 +567,8 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_create(destroy_transport_locked, t, GRPC_CLOSURE_CREATE(destroy_transport_locked, t,
grpc_combiner_scheduler(t->combiner)), grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -656,12 +656,12 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_data_parser_init(&s->data_parser); grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer); grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s, GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage); grpc_slice_buffer_init(&s->frame_storage);
s->pending_byte_stream = false; s->pending_byte_stream = false;
grpc_closure_init(&s->reset_byte_stream, reset_byte_stream, s, GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner)); grpc_combiner_scheduler(t->combiner));
GRPC_CHTTP2_REF_TRANSPORT(t, "stream"); GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
@ -733,7 +733,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_END("destroy_stream", 0); GPR_TIMER_END("destroy_stream", 0);
grpc_closure_sched(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
} }
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@ -744,8 +744,8 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
s->destroy_stream_arg = then_schedule_closure; s->destroy_stream_arg = then_schedule_closure;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s, exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner)), grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0); GPR_TIMER_END("destroy_stream", 0);
@ -796,7 +796,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
write_state_name(st), reason)); write_state_name(st), reason));
t->write_state = st; t->write_state = st;
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) { if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
grpc_closure_list_sched(exec_ctx, &t->run_after_write); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &t->run_after_write);
if (t->close_transport_on_writes_finished != NULL) { if (t->close_transport_on_writes_finished != NULL) {
grpc_error *err = t->close_transport_on_writes_finished; grpc_error *err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = NULL; t->close_transport_on_writes_finished = NULL;
@ -813,9 +813,9 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_WRITE_STATE_IDLE: case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason); set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_init(&t->write_action_begin_locked, GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
write_action_begin_locked, t, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner)), grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -863,12 +863,12 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
case GRPC_CHTTP2_PARTIAL_WRITE: case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial"); "begin writing partial");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break; break;
case GRPC_CHTTP2_FULL_WRITE: case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing"); "begin writing");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break; break;
} }
GPR_TIMER_END("write_action_begin_locked", 0); GPR_TIMER_END("write_action_begin_locked", 0);
@ -879,7 +879,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
GPR_TIMER_BEGIN("write_action", 0); GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write( grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf, exec_ctx, t->ep, &t->outbuf,
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t, GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t,
grpc_combiner_scheduler(t->combiner))); grpc_combiner_scheduler(t->combiner)));
GPR_TIMER_END("write_action", 0); GPR_TIMER_END("write_action", 0);
} }
@ -914,9 +914,9 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]"); "continue writing [!covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_closure_run( GRPC_CLOSURE_RUN(
exec_ctx, exec_ctx,
grpc_closure_init(&t->write_action_begin_locked, GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
write_action_begin_locked, t, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner)), grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -1046,7 +1046,7 @@ static void null_then_run_closure(grpc_exec_ctx *exec_ctx,
grpc_closure **closure, grpc_error *error) { grpc_closure **closure, grpc_error *error) {
grpc_closure *c = *closure; grpc_closure *c = *closure;
*closure = NULL; *closure = NULL;
grpc_closure_run(exec_ctx, c, error); GRPC_CLOSURE_RUN(exec_ctx, c, error);
} }
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
@ -1088,7 +1088,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
} }
if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) || if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
!(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) { !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) {
grpc_closure_run(exec_ctx, closure, closure->error_data.error); GRPC_CLOSURE_RUN(exec_ctx, closure, closure->error_data.error);
} else { } else {
grpc_closure_list_append(&t->run_after_write, closure, grpc_closure_list_append(&t->run_after_write, closure,
closure->error_data.error); closure->error_data.error);
@ -1224,7 +1224,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_closure *on_complete = op->on_complete; grpc_closure *on_complete = op->on_complete;
if (on_complete == NULL) { if (on_complete == NULL) {
on_complete = on_complete =
grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_CREATE(do_nothing, NULL, grpc_schedule_on_exec_ctx);
} }
/* use final_data as a barrier until enqueue time; the inital counter is /* use final_data as a barrier until enqueue time; the inital counter is
@ -1456,9 +1456,9 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
op->handler_private.extra_arg = gs; op->handler_private.extra_arg = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_init(&op->handler_private.closure, perform_stream_op_locked, GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked,
op, grpc_combiner_scheduler(t->combiner)), op, grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0); GPR_TIMER_END("perform_stream_op", 0);
@ -1472,7 +1472,7 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_ping_queue *pq = &t->ping_queues[i]; grpc_chttp2_ping_queue *pq = &t->ping_queues[i];
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) { for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error)); grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
grpc_closure_list_sched(exec_ctx, &pq->lists[j]); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
} }
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
@ -1507,7 +1507,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_free(from); gpr_free(from);
return; return;
} }
grpc_closure_list_sched(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings"); grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings");
} }
@ -1581,7 +1581,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
close_transport_locked(exec_ctx, t, close_transport); close_transport_locked(exec_ctx, t, close_transport);
} }
grpc_closure_run(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); GRPC_CLOSURE_RUN(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
} }
@ -1593,8 +1593,8 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(msg); gpr_free(msg);
op->handler_private.extra_arg = gt; op->handler_private.extra_arg = gt;
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op"); GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_init(&op->handler_private.closure, GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_transport_op_locked, op, perform_transport_op_locked, op,
grpc_combiner_scheduler(t->combiner)), grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -2472,7 +2472,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, s->t, s); grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, s->t, s);
} else { } else {
GPR_ASSERT(error != GRPC_ERROR_NONE); GPR_ASSERT(error != GRPC_ERROR_NONE);
grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
s->on_next = NULL; s->on_next = NULL;
GRPC_ERROR_UNREF(s->byte_stream_error); GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE; s->byte_stream_error = GRPC_ERROR_NONE;
@ -2551,9 +2551,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
if (s->frame_storage.length > 0) { if (s->frame_storage.length > 0) {
grpc_slice_buffer_swap(&s->frame_storage, grpc_slice_buffer_swap(&s->frame_storage,
&s->unprocessed_incoming_frames_buffer); &s->unprocessed_incoming_frames_buffer);
grpc_closure_sched(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
} else if (s->byte_stream_error != GRPC_ERROR_NONE) { } else if (s->byte_stream_error != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, bs->next_action.on_complete, GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
GRPC_ERROR_REF(s->byte_stream_error)); GRPC_ERROR_REF(s->byte_stream_error));
if (s->data_parser.parsing_frame != NULL) { if (s->data_parser.parsing_frame != NULL) {
incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame); incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
@ -2563,7 +2563,7 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
if (bs->remaining_bytes != 0) { if (bs->remaining_bytes != 0) {
s->byte_stream_error = s->byte_stream_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
grpc_closure_sched(exec_ctx, bs->next_action.on_complete, GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
GRPC_ERROR_REF(s->byte_stream_error)); GRPC_ERROR_REF(s->byte_stream_error));
if (s->data_parser.parsing_frame != NULL) { if (s->data_parser.parsing_frame != NULL) {
incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame); incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
@ -2594,9 +2594,9 @@ static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
gpr_ref(&bs->refs); gpr_ref(&bs->refs);
bs->next_action.max_size_hint = max_size_hint; bs->next_action.max_size_hint = max_size_hint;
bs->next_action.on_complete = on_complete; bs->next_action.on_complete = on_complete;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_init(&bs->next_action.closure, GRPC_CLOSURE_INIT(&bs->next_action.closure,
incoming_byte_stream_next_locked, bs, incoming_byte_stream_next_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner)), grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -2623,7 +2623,7 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
} else { } else {
grpc_error *error = grpc_error *error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
return error; return error;
} }
GPR_TIMER_END("incoming_byte_stream_pull", 0); GPR_TIMER_END("incoming_byte_stream_pull", 0);
@ -2652,8 +2652,8 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0); GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
grpc_chttp2_incoming_byte_stream *bs = grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream; (grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, grpc_closure_init( exec_ctx, GRPC_CLOSURE_INIT(
&bs->destroy_action, incoming_byte_stream_destroy_locked, &bs->destroy_action, incoming_byte_stream_destroy_locked,
bs, grpc_combiner_scheduler(bs->transport->combiner)), bs, grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -2666,7 +2666,7 @@ static void incoming_byte_stream_publish_error(
grpc_chttp2_stream *s = bs->stream; grpc_chttp2_stream *s = bs->stream;
GPR_ASSERT(error != GRPC_ERROR_NONE); GPR_ASSERT(error != GRPC_ERROR_NONE);
grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
s->on_next = NULL; s->on_next = NULL;
GRPC_ERROR_UNREF(s->byte_stream_error); GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_REF(error); s->byte_stream_error = GRPC_ERROR_REF(error);
@ -2683,7 +2683,7 @@ grpc_error *grpc_chttp2_incoming_byte_stream_push(
grpc_error *error = grpc_error *error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream");
grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
grpc_slice_unref_internal(exec_ctx, slice); grpc_slice_unref_internal(exec_ctx, slice);
return error; return error;
} else { } else {
@ -2706,7 +2706,7 @@ grpc_error *grpc_chttp2_incoming_byte_stream_finished(
} }
} }
if (error != GRPC_ERROR_NONE && reset_on_error) { if (error != GRPC_ERROR_NONE && reset_on_error) {
grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
} }
incoming_byte_stream_unref(exec_ctx, bs); incoming_byte_stream_unref(exec_ctx, bs);
return error; return error;
@ -2940,5 +2940,5 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer); grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer); gpr_free(read_buffer);
} }
grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
} }

@ -295,7 +295,7 @@ grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
GPR_ASSERT(s->frame_storage.length == 0); GPR_ASSERT(s->frame_storage.length == 0);
grpc_slice_ref_internal(slice); grpc_slice_ref_internal(slice);
grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice); grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice);
grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_NONE);
s->on_next = NULL; s->on_next = NULL;
} else { } else {
grpc_slice_ref_internal(slice); grpc_slice_ref_internal(slice);

@ -1696,9 +1696,9 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
however -- it might be that we receive a RST_STREAM following this however -- it might be that we receive a RST_STREAM following this
and can avoid the extra write */ and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst"); GRPC_CHTTP2_STREAM_REF(s, "final_rst");
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_create(force_client_rst_stream, s, GRPC_CLOSURE_CREATE(force_client_rst_stream, s,
grpc_combiner_finally_scheduler(t->combiner)), grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }

@ -111,7 +111,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
} }
pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type; pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type;
t->ping_ctr++; t->ping_ctr++;
grpc_closure_list_sched(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT], grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
grpc_slice_buffer_add(&t->outbuf, grpc_slice_buffer_add(&t->outbuf,

@ -1033,17 +1033,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_RECV_INITIAL_METADATA)) { OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready, stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready, stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) { } else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready, stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -1051,7 +1051,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_metadata_buffer_publish( grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &oas->s->state.rs.initial_metadata, exec_ctx, &oas->s->state.rs.initial_metadata,
stream_op->payload->recv_initial_metadata.recv_initial_metadata); stream_op->payload->recv_initial_metadata.recv_initial_metadata);
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready, stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -1063,14 +1063,14 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled."); CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed."); CRONET_LOG(GPR_DEBUG, "Stream failed.");
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@ -1078,7 +1078,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_state->rs.read_stream_closed == true) { } else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */ /* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed"); CRONET_LOG(GPR_DEBUG, "read stream closed");
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@ -1086,7 +1086,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->flush_read) { } else if (stream_state->flush_read) {
CRONET_LOG(GPR_DEBUG, "flush read"); CRONET_LOG(GPR_DEBUG, "flush read");
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@ -1127,7 +1127,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
*((grpc_byte_buffer **) *((grpc_byte_buffer **)
stream_op->payload->recv_message.recv_message) = stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs; (grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->payload->recv_message.recv_message_ready, exec_ctx, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@ -1181,7 +1181,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} }
*((grpc_byte_buffer **)stream_op->payload->recv_message.recv_message) = *((grpc_byte_buffer **)stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs; (grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@ -1230,17 +1230,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) { op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete,
GRPC_ERROR_REF(stream_state->cancel_error)); GRPC_ERROR_REF(stream_state->cancel_error));
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->on_complete, exec_ctx, stream_op->on_complete,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable.")); make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else { } else {
/* All actions in this stream_op are complete. Call the on_complete /* All actions in this stream_op are complete. Call the on_complete
* callback * callback
*/ */
grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
} }
oas->state.state_op_done[OP_ON_COMPLETE] = true; oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true; oas->done = true;
@ -1312,16 +1312,16 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
/* Cronet does not support :authority header field. We cancel the call when /* Cronet does not support :authority header field. We cancel the call when
this field is present in metadata */ this field is present in metadata */
if (op->recv_initial_metadata) { if (op->recv_initial_metadata) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
op->payload->recv_initial_metadata.recv_initial_metadata_ready, op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
} }
if (op->recv_message) { if (op->recv_message) {
grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready, GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready,
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
} }
grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
return; return;
} }
stream_obj *s = (stream_obj *)gs; stream_obj *s = (stream_obj *)gs;
@ -1335,7 +1335,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
stream_obj *s = (stream_obj *)gs; stream_obj *s = (stream_obj *)gs;
null_and_maybe_free_read_buffer(s); null_and_maybe_free_read_buffer(s);
GRPC_ERROR_UNREF(s->state.cancel_error); GRPC_ERROR_UNREF(s->state.cancel_error);
grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
} }
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}

@ -126,7 +126,7 @@ typedef struct {
/* Destroy per call data. /* Destroy per call data.
The filter does not need to do any chaining. The filter does not need to do any chaining.
The bottom filter of a stack will be passed a non-NULL pointer to The bottom filter of a stack will be passed a non-NULL pointer to
\a then_schedule_closure that should be passed to grpc_closure_sched when \a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when
destruction is complete. \a final_info contains data about the completed destruction is complete. \a final_info contains data about the completed
call, mainly for reporting purposes. */ call, mainly for reporting purposes. */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,

@ -190,7 +190,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// Cancel deadline timer, since we're invoking the on_handshake_done // Cancel deadline timer, since we're invoking the on_handshake_done
// callback now. // callback now.
grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error); GRPC_CLOSURE_SCHED(exec_ctx, &mgr->on_handshake_done, error);
mgr->shutdown = true; mgr->shutdown = true;
} else { } else {
grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index], grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
@ -245,13 +245,13 @@ void grpc_handshake_manager_do_handshake(
grpc_slice_buffer_init(mgr->args.read_buffer); grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers. // Initialize state needed for calling handshakers.
mgr->acceptor = acceptor; mgr->acceptor = acceptor;
grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr, GRPC_CLOSURE_INIT(&mgr->call_next_handshaker, call_next_handshaker, mgr,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args, GRPC_CLOSURE_INIT(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
// Start deadline timer, which owns a ref. // Start deadline timer, which owns a ref.
gpr_ref(&mgr->refs); gpr_ref(&mgr->refs);
grpc_closure_init(&mgr->on_timeout, on_timeout, mgr, GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &mgr->deadline_timer, grpc_timer_init(exec_ctx, &mgr->deadline_timer,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),

@ -90,7 +90,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) { grpc_error *error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent, grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set); req->context->pollset_set);
grpc_closure_sched(exec_ctx, req->on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error);
grpc_http_parser_destroy(&req->parser); grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) { if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses); grpc_resolved_addresses_destroy(req->addresses);
@ -213,7 +213,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
return; return;
} }
addr = &req->addresses->addrs[req->next_address++]; addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req, GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_arg arg; grpc_arg arg;
arg.key = GRPC_ARG_RESOURCE_QUOTA; arg.key = GRPC_ARG_RESOURCE_QUOTA;
@ -256,8 +256,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->pollent = pollent; req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE; req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_ref_internal(resource_quota); req->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
grpc_closure_init(&req->done_write, done_write, req, GRPC_CLOSURE_INIT(&req->done_write, done_write, req,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&req->incoming); grpc_slice_buffer_init(&req->incoming);
grpc_slice_buffer_init(&req->outgoing); grpc_slice_buffer_init(&req->outgoing);
@ -271,7 +271,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_resolve_address( grpc_resolve_address(
exec_ctx, request->host, req->handshaker->default_port, exec_ctx, request->host, req->handshaker->default_port,
req->context->pollset_set, req->context->pollset_set,
grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx),
&req->addresses); &req->addresses);
} }

@ -85,7 +85,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg); gpr_free(msg);
} }
grpc_closure_sched(exec_ctx, on_peer_checked, error); GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }

@ -24,14 +24,26 @@
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#ifdef GRPC_CLOSURE_RICH_DEBUG
grpc_closure *grpc_closure_init(const char *file, int line,
grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
grpc_closure_scheduler *scheduler) {
#else
grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg, void *cb_arg,
grpc_closure_scheduler *scheduler) { grpc_closure_scheduler *scheduler) {
#endif
closure->cb = cb; closure->cb = cb;
closure->cb_arg = cb_arg; closure->cb_arg = cb_arg;
closure->scheduler = scheduler; closure->scheduler = scheduler;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
closure->scheduled = false; closure->scheduled = false;
closure->file_initiated = NULL;
closure->line_initiated = 0;
closure->run = false;
closure->file_created = file;
closure->line_created = line;
#endif #endif
return closure; return closure;
} }
@ -100,19 +112,39 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
cb(exec_ctx, cb_arg, error); cb(exec_ctx, cb_arg, error);
} }
#ifdef GRPC_CLOSURE_RICH_DEBUG
grpc_closure *grpc_closure_create(const char *file, int line,
grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#else
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) { grpc_closure_scheduler *scheduler) {
#endif
wrapped_closure *wc = gpr_malloc(sizeof(*wc)); wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb; wc->cb = cb;
wc->cb_arg = cb_arg; wc->cb_arg = cb_arg;
#ifdef GRPC_CLOSURE_RICH_DEBUG
grpc_closure_init(file, line, &wc->wrapper, closure_wrapper, wc, scheduler);
#else
grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler); grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
#endif
return &wc->wrapper; return &wc->wrapper;
} }
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *c, grpc_error *error) {
#else
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) { grpc_error *error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_run", 0); GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) { if (c != NULL) {
#ifdef GRPC_CLOSURE_RICH_DEBUG
c->file_initiated = file;
c->line_initiated = line;
c->run = true;
#endif
assert(c->cb); assert(c->cb);
c->scheduler->vtable->run(exec_ctx, c, error); c->scheduler->vtable->run(exec_ctx, c, error);
} else { } else {
@ -121,13 +153,21 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_run", 0); GPR_TIMER_END("grpc_closure_run", 0);
} }
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *c, grpc_error *error) {
#else
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) { grpc_error *error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_sched", 0); GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) { if (c != NULL) {
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
GPR_ASSERT(!c->scheduled); GPR_ASSERT(!c->scheduled);
c->scheduled = true; c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
#endif #endif
assert(c->cb); assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, error); c->scheduler->vtable->sched(exec_ctx, c, error);
@ -137,13 +177,21 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_sched", 0); GPR_TIMER_END("grpc_closure_sched", 0);
} }
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_list_sched(const char *file, int line,
grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
#else
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
#endif
grpc_closure *c = list->head; grpc_closure *c = list->head;
while (c != NULL) { while (c != NULL) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
GPR_ASSERT(!c->scheduled); GPR_ASSERT(!c->scheduled);
c->scheduled = true; c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
#endif #endif
assert(c->cb); assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error); c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);

@ -59,6 +59,8 @@ struct grpc_closure_scheduler {
const grpc_closure_scheduler_vtable *vtable; const grpc_closure_scheduler_vtable *vtable;
}; };
// #define GRPC_CLOSURE_RICH_DEBUG
/** A closure over a grpc_iomgr_cb_func. */ /** A closure over a grpc_iomgr_cb_func. */
struct grpc_closure { struct grpc_closure {
/** Once queued, next indicates the next queued closure; before then, scratch /** Once queued, next indicates the next queued closure; before then, scratch
@ -85,19 +87,47 @@ struct grpc_closure {
uintptr_t scratch; uintptr_t scratch;
} error_data; } error_data;
#ifndef NDEBUG // extra tracing and debugging for grpc_closure. This incurs a decent amount of
// overhead per closure, so it must be enabled at compile time.
#ifdef GRPC_CLOSURE_RICH_DEBUG
bool scheduled; bool scheduled;
bool run; // true = run, false = scheduled
const char *file_created;
int line_created;
const char *file_initiated;
int line_initiated;
#endif #endif
}; };
/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */ /** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
#ifdef GRPC_CLOSURE_RICH_DEBUG
grpc_closure *grpc_closure_init(const char *file, int line,
grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
grpc_closure_init(__FILE__, __LINE__, closure, cb, cb_arg, scheduler)
#else
grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg, void *cb_arg,
grpc_closure_scheduler *scheduler); grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
grpc_closure_init(closure, cb, cb_arg, scheduler)
#endif
/* Create a heap allocated closure: try to avoid except for very rare events */ /* Create a heap allocated closure: try to avoid except for very rare events */
#ifdef GRPC_CLOSURE_RICH_DEBUG
grpc_closure *grpc_closure_create(const char *file, int line,
grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
grpc_closure_create(__FILE__, __LINE__, cb, cb_arg, scheduler)
#else
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler); grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
grpc_closure_create(cb, cb_arg, scheduler)
#endif
#define GRPC_CLOSURE_LIST_INIT \ #define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL } { NULL, NULL }
@ -123,16 +153,44 @@ bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above. /** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is * Note that calling this at the end of a closure callback function itself is
* by definition safe. */ * by definition safe. */
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error)
#else
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error); grpc_error *error);
#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
grpc_closure_run(exec_ctx, closure, error)
#endif
/** Schedule a closure to be run. Does not need to be run from a safe point. */ /** Schedule a closure to be run. Does not need to be run from a safe point. */
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error)
#else
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error); grpc_error *error);
#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
grpc_closure_sched(exec_ctx, closure, error)
#endif
/** Schedule all closures in a list to be run. Does not need to be run from a /** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */ * safe point. */
#ifdef GRPC_CLOSURE_RICH_DEBUG
void grpc_closure_list_sched(const char *file, int line,
grpc_exec_ctx *exec_ctx,
grpc_closure_list *closure_list);
#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list)
#else
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
grpc_closure_list *closure_list); grpc_closure_list *closure_list);
#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
grpc_closure_list_sched(exec_ctx, closure_list)
#endif
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */ #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */

@ -81,7 +81,7 @@ grpc_combiner *grpc_combiner_create(void) {
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue); gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list); grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock, grpc_executor_scheduler); GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock; return lock;
} }
@ -196,7 +196,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx); move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
} }
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
@ -247,7 +247,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.exec1", 0); GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n; grpc_closure *cl = (grpc_closure *)n;
grpc_error *cl_err = cl->error_data.error; grpc_error *cl_err = cl->error_data.error;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
cl->scheduled = false; cl->scheduled = false;
#endif #endif
cl->cb(exec_ctx, cl->cb_arg, cl_err); cl->cb(exec_ctx, cl->cb_arg, cl_err);
@ -264,7 +264,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c)); gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error; grpc_error *error = c->error_data.error;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
c->scheduled = false; c->scheduled = false;
#endif #endif
c->cb(exec_ctx, c->cb_arg, error); c->cb(exec_ctx, c->cb_arg, error);
@ -332,8 +332,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("combiner.execute_finally", 0); GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) { if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0); GPR_TIMER_MARK("slowpath", 0);
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_create(enqueue_finally, closure, GRPC_CLOSURE_CREATE(enqueue_finally, closure,
grpc_combiner_scheduler(lock)), grpc_combiner_scheduler(lock)),
error); error);
GPR_TIMER_END("combiner.execute_finally", 0); GPR_TIMER_END("combiner.execute_finally", 0);

@ -149,7 +149,7 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \ grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
errs, count) errs, count)
//#define GRPC_ERROR_REFCOUNT_DEBUG // #define GRPC_ERROR_REFCOUNT_DEBUG
#ifdef GRPC_ERROR_REFCOUNT_DEBUG #ifdef GRPC_ERROR_REFCOUNT_DEBUG
grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line, grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
const char *func); const char *func);

@ -237,7 +237,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
close(fd->fd); close(fd->fd);
} }
grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object); grpc_iomgr_unregister_object(&fd->iomgr_object);
grpc_lfev_destroy(&fd->read_closure); grpc_lfev_destroy(&fd->read_closure);
@ -427,7 +427,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) { grpc_pollset *pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL && if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
pollset->begin_refs == 0) { pollset->begin_refs == 0) {
grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = NULL; pollset->shutdown_closure = NULL;
} }
} }

@ -965,7 +965,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL; fd->po.pi = NULL;
} }
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu); gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */ UNREF_BY(fd, 2, reason); /* Drop the reference */
@ -1250,7 +1250,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */ /* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
/* pollset->po.mu lock must be held by the caller before calling this */ /* pollset->po.mu lock must be held by the caller before calling this */

@ -515,7 +515,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->eps = NULL; fd->eps = NULL;
} }
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->mu); gpr_mu_unlock(&fd->mu);
@ -716,7 +716,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->eps to NULL */ /* Release the ref and set pollset->eps to NULL */
pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown"); pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown");
grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
/* pollset->mu lock must be held by the caller before calling this */ /* pollset->mu lock must be held by the caller before calling this */

@ -269,7 +269,7 @@ static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
#endif #endif
old = gpr_atm_full_fetch_add(&fd->refst, -n); old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) { if (old == n) {
grpc_closure_sched(exec_ctx, grpc_closure_create(fd_destroy, fd, GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
grpc_schedule_on_exec_ctx), grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {
@ -367,7 +367,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
to be alive (and not added to freelist) until the end of this function */ to be alive (and not added to freelist) until the end of this function */
REF_BY(fd, 1, reason); REF_BY(fd, 1, reason);
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu); gpr_mu_unlock(&fd->orphaned_mu);
gpr_mu_unlock(&fd->pollable.po.mu); gpr_mu_unlock(&fd->pollable.po.mu);
@ -667,7 +667,7 @@ static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) { grpc_pollset *pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) { if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = NULL; pollset->shutdown_closure = NULL;
} }
} }
@ -966,8 +966,8 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
pollable_add_fd(&pollset->pollable, had_fd); pollable_add_fd(&pollset->pollable, had_fd);
pollable_add_fd(&pollset->pollable, fd); pollable_add_fd(&pollset->pollable, fd);
} }
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_create(unref_fd_no_longer_poller, had_fd, GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
grpc_schedule_on_exec_ctx), grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }

@ -893,7 +893,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL; fd->po.pi = NULL;
} }
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu); gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */ UNREF_BY(fd, 2, reason); /* Drop the reference */
@ -1147,7 +1147,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */ /* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
/* pollset->po.mu lock must be held by the caller before calling this */ /* pollset->po.mu lock must be held by the caller before calling this */

@ -386,7 +386,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
if (!fd->released) { if (!fd->released) {
close(fd->fd); close(fd->fd);
} }
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
} }
static int fd_wrapped_fd(grpc_fd *fd) { static int fd_wrapped_fd(grpc_fd *fd) {
@ -445,7 +445,7 @@ static grpc_error *fd_shutdown_error(grpc_fd *fd) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) { grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) { if (fd->shutdown) {
grpc_closure_sched(exec_ctx, closure, GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) { } else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */ /* not ready ==> switch to a waiting state by setting the closure */
@ -453,7 +453,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} else if (*st == CLOSURE_READY) { } else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */ /* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd)); GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
maybe_wake_one_watcher_locked(fd); maybe_wake_one_watcher_locked(fd);
} else { } else {
/* upcallptr was set to a different closure. This is an error! */ /* upcallptr was set to a different closure. This is an error! */
@ -476,7 +476,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0; return 0;
} else { } else {
/* waiting ==> queue closure */ /* waiting ==> queue closure */
grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd)); GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd));
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
return 1; return 1;
} }
@ -834,7 +834,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller"); GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} }
pollset->fd_count = 0; pollset->fd_count = 0;
grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
static void work_combine_error(grpc_error **composite, grpc_error *error) { static void work_combine_error(grpc_error **composite, grpc_error *error) {
@ -883,7 +883,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!pollset_has_workers(pollset) && if (!pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) { !grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("pollset_work.idle_jobs", 0); GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
goto done; goto done;
} }
/* If we're shutting down then we don't execute any extended work */ /* If we're shutting down then we don't execute any extended work */
@ -1056,7 +1056,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) { } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
@ -1075,7 +1075,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutdown_done = closure; pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) { if (!pollset_has_workers(pollset)) {
grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
} }
if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1; pollset->called_shutdown = 1;

@ -62,7 +62,7 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error; grpc_error *error = c->error_data.error;
did_something = true; did_something = true;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
c->scheduled = false; c->scheduled = false;
#endif #endif
c->cb(exec_ctx, c->cb_arg, error); c->cb(exec_ctx, c->cb_arg, error);
@ -85,7 +85,7 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) { grpc_error *error) {
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
closure->scheduled = false; closure->scheduled = false;
#endif #endif
closure->cb(exec_ctx, closure->cb_arg, error); closure->cb(exec_ctx, closure->cb_arg, error);

@ -58,7 +58,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) { while (c != NULL) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error; grpc_error *error = c->error_data.error;
#ifndef NDEBUG #ifdef GRPC_CLOSURE_RICH_DEBUG
c->scheduled = false; c->scheduled = false;
#endif #endif
c->cb(exec_ctx, c->cb_arg, error); c->cb(exec_ctx, c->cb_arg, error);

@ -112,7 +112,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
closure when transitioning out of CLOSURE_NO_READY state (i.e there closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */ is no other code that needs to 'happen-after' this) */
if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) { if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */ return; /* Successful. Return */
} }
@ -125,7 +125,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
schedule the closure with the shutdown error */ schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) { if ((curr & FD_SHUTDOWN_BIT) > 0) {
grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT); grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
grpc_closure_sched(exec_ctx, closure, GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1)); "FD Shutdown", &shutdown_err, 1));
return; return;
@ -177,7 +177,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
happens-after on that edge), and a release to pair with anything happens-after on that edge), and a release to pair with anything
loading the shutdown state. */ loading the shutdown state. */
if (gpr_atm_full_cas(state, curr, new_state)) { if (gpr_atm_full_cas(state, curr, new_state)) {
grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1)); "FD Shutdown", &shutdown_err, 1));
return true; return true;
@ -226,7 +226,7 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) {
spurious set_ready; release pairs with this or the acquire in spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */ notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) { else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
return; return;
} }
/* else the state changed again (only possible by either a racing /* else the state changed again (only possible by either a racing

@ -88,7 +88,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
// kick the loop once // kick the loop once
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
} }
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} }
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {

@ -95,7 +95,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1; pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) { if (!pollset->is_iocp_worker) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
pollset->on_shutdown = closure; pollset->on_shutdown = closure;
} }
@ -143,7 +143,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} }
if (pollset->shutting_down && pollset->on_shutdown != NULL) { if (pollset->shutting_down && pollset->on_shutdown != NULL) {
grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
pollset->on_shutdown = NULL; pollset->on_shutdown = NULL;
} }
goto done; goto done;

@ -154,7 +154,7 @@ typedef struct {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) { grpc_error *error) {
request *r = rp; request *r = rp;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done, exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out)); grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
gpr_free(r->name); gpr_free(r->name);
@ -175,13 +175,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done, grpc_closure *on_done,
grpc_resolved_addresses **addrs) { grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request)); request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r, GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler); grpc_executor_scheduler);
r->name = gpr_strdup(name); r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port); r->default_port = gpr_strdup(default_port);
r->on_done = on_done; r->on_done = on_done;
r->addrs_out = addrs; r->addrs_out = addrs;
grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
} }
void (*grpc_resolve_address)( void (*grpc_resolve_address)(

@ -124,7 +124,7 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
/* Either no retry was attempted, or the retry failed. Either way, the /* Either no retry was attempted, or the retry failed. Either way, the
original error probably has more interesting information */ original error probably has more interesting information */
error = handle_addrinfo_result(status, res, r->addresses); error = handle_addrinfo_result(status, res, r->addresses);
grpc_closure_sched(&exec_ctx, r->on_done, error); GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints); gpr_free(r->hints);
gpr_free(r); gpr_free(r);
@ -225,7 +225,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
int s; int s;
err = try_split_host_port(name, default_port, &host, &port); err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, on_done, err); GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
return; return;
} }
r = gpr_malloc(sizeof(request)); r = gpr_malloc(sizeof(request));
@ -252,7 +252,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed"); err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(s))); grpc_slice_from_static_string(uv_strerror(s)));
grpc_closure_sched(exec_ctx, on_done, err); GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
gpr_free(r); gpr_free(r);
gpr_free(req); gpr_free(req);
gpr_free(hints); gpr_free(hints);

@ -139,7 +139,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
grpc_closure_sched(exec_ctx, r->on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error);
gpr_free(r->name); gpr_free(r->name);
gpr_free(r->default_port); gpr_free(r->default_port);
gpr_free(r); gpr_free(r);
@ -158,13 +158,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done, grpc_closure *on_done,
grpc_resolved_addresses **addresses) { grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request)); request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r, GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler); grpc_executor_scheduler);
r->name = gpr_strdup(name); r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port); r->default_port = gpr_strdup(default_port);
r->on_done = on_done; r->on_done = on_done;
r->addresses = addresses; r->addresses = addresses;
grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
} }
void (*grpc_resolve_address)( void (*grpc_resolve_address)(

@ -259,7 +259,7 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
if (resource_quota->step_scheduled) return; if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true; resource_quota->step_scheduled = true;
grpc_resource_quota_ref_internal(resource_quota); grpc_resource_quota_ref_internal(resource_quota);
grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure, GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -305,7 +305,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
} }
if (resource_user->free_pool >= 0) { if (resource_user->free_pool >= 0) {
resource_user->allocating = false; resource_user->allocating = false;
grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
} else { } else {
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@ -363,7 +363,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
resource_quota->debug_only_last_reclaimer_resource_user = resource_user; resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
resource_quota->debug_only_last_initiated_reclaimer = c; resource_quota->debug_only_last_initiated_reclaimer = c;
resource_user->reclaimers[destructive] = NULL; resource_user->reclaimers[destructive] = NULL;
grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE); GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE);
return true; return true;
} }
@ -444,7 +444,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
resource_user->new_reclaimers[destructive] = NULL; resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED);
return false; return false;
} }
resource_user->reclaimers[destructive] = closure; resource_user->reclaimers[destructive] = closure;
@ -485,9 +485,9 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru; grpc_resource_user *resource_user = ru;
grpc_closure_sched(exec_ctx, resource_user->reclaimers[0], GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
grpc_closure_sched(exec_ctx, resource_user->reclaimers[1], GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
resource_user->reclaimers[0] = NULL; resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL; resource_user->reclaimers[1] = NULL;
@ -501,9 +501,9 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
for (int i = 0; i < GRPC_RULIST_COUNT; i++) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i); rulist_remove(resource_user, (grpc_rulist)i);
} }
grpc_closure_sched(exec_ctx, resource_user->reclaimers[0], GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
grpc_closure_sched(exec_ctx, resource_user->reclaimers[1], GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED); GRPC_ERROR_CANCELLED);
if (resource_user->free_pool != 0) { if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool; resource_user->resource_quota->free_pool += resource_user->free_pool;
@ -525,7 +525,7 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
slice_allocator->length)); slice_allocator->length));
} }
} }
grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
} }
/******************************************************************************* /*******************************************************************************
@ -579,9 +579,9 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR, gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota); (intptr_t)resource_quota);
} }
grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota, GRPC_CLOSURE_INIT(&resource_quota->rq_step_closure, rq_step, resource_quota,
grpc_combiner_finally_scheduler(resource_quota->combiner)); grpc_combiner_finally_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_quota->rq_reclamation_done_closure, GRPC_CLOSURE_INIT(&resource_quota->rq_reclamation_done_closure,
rq_reclamation_done, resource_quota, rq_reclamation_done, resource_quota,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
@ -633,8 +633,8 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
a->size = (int64_t)size; a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size, gpr_atm_no_barrier_store(&resource_quota->last_size,
(gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size)); (gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -686,19 +686,19 @@ grpc_resource_user *grpc_resource_user_create(
grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user)); grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota = resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota); grpc_resource_quota_ref_internal(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &ru_allocate, GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
resource_user, resource_user,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->add_to_free_pool_closure, GRPC_CLOSURE_INIT(&resource_user->add_to_free_pool_closure,
&ru_add_to_free_pool, resource_user, &ru_add_to_free_pool, resource_user,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[0], GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[0],
&ru_post_benign_reclaimer, resource_user, &ru_post_benign_reclaimer, resource_user,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[1], GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[1],
&ru_post_destructive_reclaimer, resource_user, &ru_post_destructive_reclaimer, resource_user,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user, GRPC_CLOSURE_INIT(&resource_user->destroy_closure, &ru_destroy, resource_user,
grpc_combiner_scheduler(resource_quota->combiner)); grpc_combiner_scheduler(resource_quota->combiner));
gpr_mu_init(&resource_user->mu); gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1); gpr_atm_rel_store(&resource_user->refs, 1);
@ -739,7 +739,7 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx,
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount); GPR_ASSERT(old >= amount);
if (old == amount) { if (old == amount) {
grpc_closure_sched(exec_ctx, &resource_user->destroy_closure, GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
} }
@ -756,9 +756,9 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) { grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
grpc_closure_create( GRPC_CLOSURE_CREATE(
ru_shutdown, resource_user, ru_shutdown, resource_user,
grpc_combiner_scheduler(resource_user->resource_quota->combiner)), grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -781,11 +781,11 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
if (!resource_user->allocating) { if (!resource_user->allocating) {
resource_user->allocating = true; resource_user->allocating = true;
grpc_closure_sched(exec_ctx, &resource_user->allocate_closure, GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
} else { } else {
grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
} }
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
} }
@ -804,7 +804,7 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
if (is_bigger_than_zero && was_zero_or_negative && if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) { !resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true; resource_user->added_to_free_pool = true;
grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure, GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
@ -817,7 +817,7 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_closure *closure) { grpc_closure *closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL); GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure; resource_user->new_reclaimers[destructive] = closure;
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
&resource_user->post_reclaimer_closure[destructive], &resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -828,7 +828,7 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name); resource_user->resource_quota->name, resource_user->name);
} }
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure, exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -836,9 +836,9 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_slice_allocator_init( void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator, grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) { grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices, GRPC_CLOSURE_INIT(&slice_allocator->on_allocated, ru_allocated_slices,
slice_allocator, grpc_schedule_on_exec_ctx); slice_allocator, grpc_schedule_on_exec_ctx);
grpc_closure_init(&slice_allocator->on_done, cb, p, GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
slice_allocator->resource_user = resource_user; slice_allocator->resource_user = resource_user;
} }

@ -116,7 +116,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu); gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) { if (info->has_pending_iocp) {
info->has_pending_iocp = 0; info->has_pending_iocp = 0;
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
info->closure = closure; info->closure = closure;
} }
@ -139,7 +139,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
GPR_ASSERT(!info->has_pending_iocp); GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu); gpr_mu_lock(&socket->state_mu);
if (info->closure) { if (info->closure) {
grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE);
info->closure = NULL; info->closure = NULL;
} else { } else {
info->has_pending_iocp = 1; info->has_pending_iocp = 1;

@ -234,7 +234,7 @@ finish:
grpc_channel_args_destroy(exec_ctx, ac->channel_args); grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_free(ac); gpr_free(ac);
} }
grpc_closure_sched(exec_ctx, closure, error); GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
} }
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@ -263,7 +263,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, closure, error); GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return; return;
} }
if (dsmode == GRPC_DSMODE_IPV4) { if (dsmode == GRPC_DSMODE_IPV4) {
@ -272,7 +272,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr = &addr4_copy; addr = &addr4_copy;
} }
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) { if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, closure, error); GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return; return;
} }
@ -290,13 +290,13 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (err >= 0) { if (err >= 0) {
*ep = *ep =
grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str); grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
goto done; goto done;
} }
if (errno != EWOULDBLOCK && errno != EINPROGRESS) { if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error"); grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect")); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
goto done; goto done;
} }
@ -311,7 +311,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr_str = NULL; addr_str = NULL;
gpr_mu_init(&ac->mu); gpr_mu_init(&ac->mu);
ac->refs = 2; ac->refs = 2;
grpc_closure_init(&ac->write_closure, on_writable, ac, GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args); ac->channel_args = grpc_channel_args_copy(channel_args);
@ -321,7 +321,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
} }
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
grpc_closure_init(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm, grpc_timer_init(exec_ctx, &ac->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
&ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));

@ -107,7 +107,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
if (done) { if (done) {
uv_tcp_connect_cleanup(&exec_ctx, connect); uv_tcp_connect_cleanup(&exec_ctx, connect);
} }
grpc_closure_sched(&exec_ctx, closure, error); GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -150,7 +150,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
uv_tcp_connect(&connect->connect_req, connect->tcp_handle, uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr, (const struct sockaddr *)resolved_addr->addr,
uv_tc_on_connect); uv_tc_on_connect);
grpc_closure_init(&connect->on_alarm, uv_tc_on_alarm, connect, GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &connect->alarm, grpc_timer_init(exec_ctx, &connect->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),

@ -116,7 +116,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(exec_ctx, ac, socket); async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when /* If the connection was aborted, the callback was already called when
the deadline was met. */ the deadline was met. */
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
} }
/* Tries to issue one async connection, then schedules both an IOCP /* Tries to issue one async connection, then schedules both an IOCP
@ -201,9 +201,9 @@ static void tcp_client_connect_impl(
ac->addr_name = grpc_sockaddr_to_uri(addr); ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint; ac->endpoint = endpoint;
ac->channel_args = grpc_channel_args_copy(channel_args); ac->channel_args = grpc_channel_args_copy(channel_args);
grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
grpc_closure_init(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm, grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm,
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect); grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
@ -222,7 +222,7 @@ failure:
} else if (sock != INVALID_SOCKET) { } else if (sock != INVALID_SOCKET) {
closesocket(sock); closesocket(sock);
} }
grpc_closure_sched(exec_ctx, on_done, final_error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error);
} }
// overridden by api_fuzzer.c // overridden by api_fuzzer.c

@ -221,7 +221,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL; tcp->read_cb = NULL;
tcp->incoming_buffer = NULL; tcp->incoming_buffer = NULL;
grpc_closure_run(exec_ctx, cb, error); GRPC_CLOSURE_RUN(exec_ctx, cb, error);
} }
#define MAX_READ_IOVEC 4 #define MAX_READ_IOVEC 4
@ -348,7 +348,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = false; tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else { } else {
grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
} }
} }
@ -465,7 +465,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
gpr_log(GPR_DEBUG, "write: %s", str); gpr_log(GPR_DEBUG, "write: %s", str);
} }
grpc_closure_run(exec_ctx, cb, error); GRPC_CLOSURE_RUN(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write"); TCP_UNREF(exec_ctx, tcp, "write");
} }
} }
@ -491,7 +491,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) { if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0); GPR_TIMER_END("tcp_write", 0);
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, cb, exec_ctx, cb,
grpc_fd_is_shutdown(tcp->em_fd) grpc_fd_is_shutdown(tcp->em_fd)
? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
@ -515,7 +515,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
const char *str = grpc_error_string(error); const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str); gpr_log(GPR_DEBUG, "write: %s", str);
} }
grpc_closure_sched(exec_ctx, cb, error); GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
} }
GPR_TIMER_END("tcp_write", 0); GPR_TIMER_END("tcp_write", 0);
@ -616,9 +616,9 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd; tcp->em_fd = em_fd;
grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp, GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp, GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer); grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);

@ -121,7 +121,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown); GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
gpr_mu_destroy(&s->mu); gpr_mu_destroy(&s->mu);
@ -163,7 +163,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_tcp_listener *sp; grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) { for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr); grpc_unlink_if_unix_domain_socket(&sp->addr);
grpc_closure_init(&sp->destroyed_closure, destroyed_port, s, GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown"); "tcp_listener_shutdown");
@ -503,7 +503,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); "clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
grpc_closure_init(&sp->read_closure, on_read, sp, GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++; s->active_ports++;
@ -513,7 +513,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
} }
grpc_closure_init(&sp->read_closure, on_read, sp, GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++; s->active_ports++;
@ -540,7 +540,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s); grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu); gpr_mu_lock(&s->mu);
grpc_closure_list_sched(exec_ctx, &s->shutdown_starting); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s); tcp_server_destroy(exec_ctx, s);
} }

@ -117,7 +117,7 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown); GPR_ASSERT(s->shutdown);
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
while (s->head) { while (s->head) {
@ -171,7 +171,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */ /* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting); GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting);
if (exec_ctx == NULL) { if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx); grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s); tcp_server_destroy(&local_exec_ctx, s);

@ -134,10 +134,10 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) { grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s, GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s,
grpc_schedule_on_exec_ctx), grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -176,7 +176,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s); grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu); gpr_mu_lock(&s->mu);
grpc_closure_list_sched(exec_ctx, &s->shutdown_starting); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s); tcp_server_destroy(exec_ctx, s);
} }
@ -437,7 +437,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->new_socket = INVALID_SOCKET; sp->new_socket = INVALID_SOCKET;
sp->port = port; sp->port = port;
sp->port_index = port_index; sp->port_index = port_index;
grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->socket); GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
*listener = sp; *listener = sp;

@ -161,7 +161,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// nread < 0: Error // nread < 0: Error
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"); error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
} }
grpc_closure_sched(&exec_ctx, cb, error); GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -183,7 +183,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
error = error =
grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status))); grpc_slice_from_static_string(uv_strerror(status)));
grpc_closure_sched(exec_ctx, cb, error); GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
} }
if (GRPC_TRACER_ON(grpc_tcp_trace)) { if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error); const char *str = grpc_error_string(error);
@ -210,7 +210,7 @@ static void write_callback(uv_write_t *req, int status) {
gpr_free(tcp->write_buffers); gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, tcp->resource_user, grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count); sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_closure_sched(&exec_ctx, cb, error); GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -236,7 +236,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
} }
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down")); "TCP socket is shutting down"));
return; return;
} }
@ -247,7 +247,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (tcp->write_slices->count == 0) { if (tcp->write_slices->count == 0) {
// No slices means we don't have to do anything, // No slices means we don't have to do anything,
// and libuv doesn't like empty writes // and libuv doesn't like empty writes
grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
return; return;
} }

@ -179,7 +179,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
tcp->read_cb = NULL; tcp->read_cb = NULL;
TCP_UNREF(exec_ctx, tcp, "read"); TCP_UNREF(exec_ctx, tcp, "read");
grpc_closure_sched(exec_ctx, cb, error); GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
} }
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -193,7 +193,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer; WSABUF buffer;
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, cb, exec_ctx, cb,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP socket is shutting down", &tcp->shutdown_error, 1)); "TCP socket is shutting down", &tcp->shutdown_error, 1));
@ -220,7 +220,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */ /* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) { if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read; info->bytes_transfered = bytes_read;
grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
return; return;
} }
@ -233,7 +233,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError(); int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) { if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error; info->wsa_error = wsa_error;
grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
GRPC_WSA_ERROR(info->wsa_error, "WSARecv")); GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
return; return;
} }
@ -265,7 +265,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
} }
TCP_UNREF(exec_ctx, tcp, "write"); TCP_UNREF(exec_ctx, tcp, "write");
grpc_closure_sched(exec_ctx, cb, error); GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
} }
/* Initiates a write. */ /* Initiates a write. */
@ -283,7 +283,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len; size_t len;
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, cb, exec_ctx, cb,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP socket is shutting down", &tcp->shutdown_error, 1)); "TCP socket is shutting down", &tcp->shutdown_error, 1));
@ -317,7 +317,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *error = status == 0 grpc_error *error = status == 0
? GRPC_ERROR_NONE ? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend"); : GRPC_WSA_ERROR(info->wsa_error, "WSASend");
grpc_closure_sched(exec_ctx, cb, error); GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
if (allocated) gpr_free(allocated); if (allocated) gpr_free(allocated);
return; return;
} }
@ -335,7 +335,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError(); int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) { if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(exec_ctx, tcp, "write"); TCP_UNREF(exec_ctx, tcp, "write");
grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
return; return;
} }
} }
@ -426,8 +426,8 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
tcp->socket = socket; tcp->socket = socket;
gpr_mu_init(&tcp->mu); gpr_mu_init(&tcp->mu);
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string); tcp->peer_string = gpr_strdup(peer_string);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */ /* Tell network status tracking code about the new endpoint */

@ -230,7 +230,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (!g_shared_mutables.initialized) { if (!g_shared_mutables.initialized) {
timer->pending = false; timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Attempt to create timer before initialization")); "Attempt to create timer before initialization"));
return; return;
@ -240,7 +240,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->pending = true; timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) { if (gpr_time_cmp(deadline, now) <= 0) {
timer->pending = false; timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu); gpr_mu_unlock(&shard->mu);
/* early out */ /* early out */
return; return;
@ -310,7 +310,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
timer->pending ? "true" : "false"); timer->pending ? "true" : "false");
} }
if (timer->pending) { if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false; timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) { if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer); list_remove(timer);
@ -400,7 +400,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer; grpc_timer *timer;
gpr_mu_lock(&shard->mu); gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) { while ((timer = pop_one(shard, now))) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++; n++;
} }
*new_min_deadline = compute_min_deadline(shard); *new_min_deadline = compute_min_deadline(shard);

@ -44,7 +44,7 @@ void run_expired_timer(uv_timer_t *handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(timer->pending); GPR_ASSERT(timer->pending);
timer->pending = 0; timer->pending = 0;
grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle); stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -57,7 +57,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->closure = closure; timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) { if (gpr_time_cmp(deadline, now) <= 0) {
timer->pending = 0; timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return; return;
} }
timer->pending = 1; timer->pending = 1;
@ -76,7 +76,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (timer->pending) { if (timer->pending) {
timer->pending = 0; timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer); stop_uv_timer((uv_timer_t *)timer->uv_timer);
} }
} }

@ -156,7 +156,7 @@ static void dummy_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
gpr_mu_destroy(&s->mu); gpr_mu_destroy(&s->mu);
@ -201,13 +201,13 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
for (sp = s->head; sp; sp = sp->next) { for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr); grpc_unlink_if_unix_domain_socket(&sp->addr);
grpc_closure_init(&sp->destroyed_closure, destroyed_port, s, GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
if (!sp->orphan_notified) { if (!sp->orphan_notified) {
/* Call the orphan_cb to signal that the FD is about to be closed and /* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. Because at this point, all listening ports * should no longer be used. Because at this point, all listening ports
* have been shutdown already, no need to shutdown again.*/ * have been shutdown already, no need to shutdown again.*/
grpc_closure_init(&sp->orphan_fd_closure, dummy_cb, sp->emfd, GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->orphan_cb); GPR_ASSERT(sp->orphan_cb);
sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
@ -240,7 +240,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
struct shutdown_fd_args *args = gpr_malloc(sizeof(*args)); struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
args->fd = sp->emfd; args->fd = sp->emfd;
args->server_mu = &s->mu; args->server_mu = &s->mu;
grpc_closure_init(&sp->orphan_fd_closure, shutdown_fd, args, GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
sp->server->user_data); sp->server->user_data);
@ -525,11 +525,11 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
} }
grpc_closure_init(&sp->read_closure, on_read, sp, GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
grpc_closure_init(&sp->write_closure, on_write, sp, GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);

@ -123,8 +123,8 @@ static void md_only_test_get_request_metadata(
if (c->is_async) { if (c->is_async) {
grpc_credentials_metadata_request *cb_arg = grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data); grpc_credentials_metadata_request_create(creds, cb, user_data);
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_create(on_simulated_token_fetch_done, GRPC_CLOSURE_CREATE(on_simulated_token_fetch_done,
cb_arg, grpc_executor_scheduler), cb_arg, grpc_executor_scheduler),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {

@ -115,7 +115,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, &context, &detector.pollent, resource_quota, &request, exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector, GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector,
grpc_schedule_on_exec_ctx), grpc_schedule_on_exec_ctx),
&detector.response); &detector.response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota); grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@ -140,7 +140,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
gpr_mu_unlock(g_polling_mu); gpr_mu_unlock(g_polling_mu);
grpc_httpcli_context_destroy(exec_ctx, &context); grpc_httpcli_context_destroy(exec_ctx, &context);
grpc_closure_init(&destroy_closure, destroy_pollset, GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset,
grpc_polling_entity_pollset(&detector.pollent), grpc_polling_entity_pollset(&detector.pollent),
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, grpc_pollset_shutdown(exec_ctx,

@ -668,7 +668,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]); &ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota); grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
grpc_json_destroy(json); grpc_json_destroy(json);
@ -771,7 +771,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss); gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
} }
http_cb = http_cb =
grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_KEYS; rsp_idx = HTTP_RESPONSE_KEYS;
} else { } else {
req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss); req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
@ -783,7 +783,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s%s", path_prefix, gpr_asprintf(&req.http.path, "/%s%s", path_prefix,
GRPC_OPENID_CONFIG_URL_SUFFIX); GRPC_OPENID_CONFIG_URL_SUFFIX);
} }
http_cb = grpc_closure_create(on_openid_config_retrieved, ctx, http_cb = GRPC_CLOSURE_CREATE(on_openid_config_retrieved, ctx,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_OPENID; rsp_idx = HTTP_RESPONSE_OPENID;
} }

@ -300,7 +300,7 @@ static void compute_engine_fetch_oauth2(
grpc_resource_quota_create("oauth2_credentials"); grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline, exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota); grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@ -360,7 +360,7 @@ static void refresh_token_fetch_oauth2(
grpc_httpcli_post( grpc_httpcli_post(
exec_ctx, httpcli_context, pollent, resource_quota, &request, body, exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
strlen(body), deadline, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota); grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
gpr_free(body); gpr_free(body);

@ -132,7 +132,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
} }
} }
ep->read_buffer = NULL; ep->read_buffer = NULL;
grpc_closure_sched(exec_ctx, ep->read_cb, error); GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
} }
@ -317,7 +317,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) { if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */ /* TODO(yangg) do different things according to the error type? */
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer); grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, cb, exec_ctx, cb,
grpc_set_tsi_error_result( grpc_set_tsi_error_result(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result)); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
@ -399,7 +399,7 @@ grpc_endpoint *grpc_secure_endpoint_create(
grpc_slice_buffer_init(&ep->output_buffer); grpc_slice_buffer_init(&ep->output_buffer);
grpc_slice_buffer_init(&ep->source_buffer); grpc_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL; ep->read_buffer = NULL;
grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
gpr_mu_init(&ep->protector_mu); gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1); gpr_ref_init(&ep->ref, 1);
return &ep->base; return &ep->base;

@ -122,7 +122,7 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context, grpc_auth_context **auth_context,
grpc_closure *on_peer_checked) { grpc_closure *on_peer_checked) {
if (sc == NULL) { if (sc == NULL) {
grpc_closure_sched(exec_ctx, on_peer_checked, GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked,
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"cannot check peer -- no security connector")); "cannot check peer -- no security connector"));
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
@ -340,7 +340,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
*auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE); GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end: end:
grpc_closure_sched(exec_ctx, on_peer_checked, error); GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }
@ -602,7 +602,7 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
? c->overridden_target_name ? c->overridden_target_name
: c->target_name, : c->target_name,
&peer, auth_context); &peer, auth_context);
grpc_closure_sched(exec_ctx, on_peer_checked, error); GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }
@ -612,7 +612,7 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure *on_peer_checked) { grpc_closure *on_peer_checked) {
grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context); grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
grpc_closure_sched(exec_ctx, on_peer_checked, error); GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
} }
static void add_shallow_auth_property_to_peer(tsi_peer *peer, static void add_shallow_auth_property_to_peer(tsi_peer *peer,

@ -124,7 +124,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
h->shutdown = true; h->shutdown = true;
} }
// Invoke callback. // Invoke callback.
grpc_closure_sched(exec_ctx, h->on_handshake_done, error); GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
} }
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
@ -173,7 +173,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1); grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
grpc_channel_args_destroy(exec_ctx, tmp_args); grpc_channel_args_destroy(exec_ctx, tmp_args);
// Invoke callback. // Invoke callback.
grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
// Set shutdown to true so that subsequent calls to // Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing. // security_handshaker_shutdown() do nothing.
h->shutdown = true; h->shutdown = true;
@ -408,13 +408,13 @@ static grpc_handshaker *security_handshaker_create(
gpr_ref_init(&h->refs, 1); gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE; h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size); h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
grpc_closure_init(&h->on_handshake_data_sent_to_peer, GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h, on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&h->on_handshake_data_received_from_peer, GRPC_CLOSURE_INIT(&h->on_handshake_data_received_from_peer,
on_handshake_data_received_from_peer, h, on_handshake_data_received_from_peer, h,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&h->on_peer_checked, on_peer_checked, h, GRPC_CLOSURE_INIT(&h->on_peer_checked, on_peer_checked, h,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&h->outgoing); grpc_slice_buffer_init(&h->outgoing);
return &h->base; return &h->base;
@ -440,7 +440,7 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_tcp_server_acceptor *acceptor, grpc_tcp_server_acceptor *acceptor,
grpc_closure *on_handshake_done, grpc_closure *on_handshake_done,
grpc_handshaker_args *args) { grpc_handshaker_args *args) {
grpc_closure_sched(exec_ctx, on_handshake_done, GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done,
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to create security handshaker")); "Failed to create security handshaker"));
} }

@ -113,7 +113,7 @@ static void on_md_processing_done(
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value); grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
} }
grpc_metadata_array_destroy(&calld->md); grpc_metadata_array_destroy(&calld->md);
grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
} else { } else {
for (size_t i = 0; i < calld->md.count; i++) { for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key); grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
@ -128,7 +128,7 @@ static void on_md_processing_done(
&exec_ctx, calld->transport_op->payload->send_message.send_message); &exec_ctx, calld->transport_op->payload->send_message.send_message);
calld->transport_op->payload->send_message.send_message = NULL; calld->transport_op->payload->send_message.send_message = NULL;
} }
grpc_closure_sched( GRPC_CLOSURE_SCHED(
&exec_ctx, calld->on_done_recv, &exec_ctx, calld->on_done_recv,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status)); GRPC_ERROR_INT_GRPC_STATUS, status));
@ -151,7 +151,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
return; return;
} }
} }
grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
} }
static void set_recv_ops_md_callbacks(grpc_call_element *elem, static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@ -193,7 +193,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */ /* initialize members */
memset(calld, 0, sizeof(*calld)); memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem, GRPC_CLOSURE_INIT(&calld->auth_on_recv, auth_on_recv, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) { if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {

@ -50,7 +50,7 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
alarm->tag = tag; alarm->tag = tag;
grpc_cq_begin_op(cq, tag); grpc_cq_begin_op(cq, tag);
grpc_closure_init(&alarm->on_alarm, alarm_cb, alarm, GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(&exec_ctx, &alarm->alarm, grpc_timer_init(&exec_ctx, &alarm->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),

@ -520,7 +520,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
} }
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info,
grpc_closure_init(&c->release_call, release_call, c, GRPC_CLOSURE_INIT(&c->release_call, release_call, c,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
GPR_TIMER_END("destroy_call", 0); GPR_TIMER_END("destroy_call", 0);
} }
@ -634,7 +634,7 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
GRPC_CALL_INTERNAL_REF(c, "termination"); GRPC_CALL_INTERNAL_REF(c, "termination");
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op( grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
grpc_closure_create(done_termination, c, grpc_schedule_on_exec_ctx)); GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
op->cancel_stream = true; op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error; op->payload->cancel_stream.cancel_error = error;
execute_op(exec_ctx, c, op); execute_op(exec_ctx, c, op);
@ -1170,7 +1170,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) { if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */ /* unrefs bctl->error */
bctl->call = NULL; bctl->call = NULL;
grpc_closure_run(exec_ctx, bctl->completion_data.notify_tag.tag, error); GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else { } else {
/* unrefs bctl->error */ /* unrefs bctl->error */
@ -1275,7 +1275,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
} else { } else {
*call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0); *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
} }
grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl, GRPC_CLOSURE_INIT(&call->receiving_slice_ready, receiving_slice_ready, bctl,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
continue_receiving_slices(exec_ctx, bctl); continue_receiving_slices(exec_ctx, bctl);
} }
@ -1390,11 +1390,11 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
call->has_initial_md_been_received = true; call->has_initial_md_been_received = true;
if (call->saved_receiving_stream_ready_bctlp != NULL) { if (call->saved_receiving_stream_ready_bctlp != NULL) {
grpc_closure *saved_rsr_closure = grpc_closure_create( grpc_closure *saved_rsr_closure = GRPC_CLOSURE_CREATE(
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp, receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
call->saved_receiving_stream_ready_bctlp = NULL; call->saved_receiving_stream_ready_bctlp = NULL;
grpc_closure_run(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
} }
finish_batch_step(exec_ctx, bctl); finish_batch_step(exec_ctx, bctl);
@ -1436,7 +1436,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
free_no_op_completion, NULL, free_no_op_completion, NULL,
gpr_malloc(sizeof(grpc_cq_completion))); gpr_malloc(sizeof(grpc_cq_completion)));
} else { } else {
grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
} }
error = GRPC_CALL_OK; error = GRPC_CALL_OK;
goto done; goto done;
@ -1644,7 +1644,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->received_initial_metadata = true; call->received_initial_metadata = true;
call->buffered_metadata[0] = call->buffered_metadata[0] =
op->data.recv_initial_metadata.recv_initial_metadata; op->data.recv_initial_metadata.recv_initial_metadata;
grpc_closure_init(&call->receiving_initial_metadata_ready, GRPC_CLOSURE_INIT(&call->receiving_initial_metadata_ready,
receiving_initial_metadata_ready, bctl, receiving_initial_metadata_ready, bctl,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
stream_op->recv_initial_metadata = true; stream_op->recv_initial_metadata = true;
@ -1668,7 +1668,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true; stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message; call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream; stream_op_payload->recv_message.recv_message = &call->receiving_stream;
grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready, GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
bctl, grpc_schedule_on_exec_ctx); bctl, grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready = stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready; &call->receiving_stream_ready;
@ -1734,7 +1734,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
} }
gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed); gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
grpc_closure_init(&bctl->finish_batch, finish_batch, bctl, GRPC_CLOSURE_INIT(&bctl->finish_batch, finish_batch, bctl,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch; stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1); gpr_atm_rel_store(&call->any_ops_sent_atm, 1);

@ -56,7 +56,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GPR_ASSERT(reserved == NULL); GPR_ASSERT(reserved == NULL);
pr->tag = tag; pr->tag = tag;
pr->cq = cq; pr->cq = cq;
grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
op->send_ping = &pr->closure; op->send_ping = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq); op->bind_pollset = grpc_cq_pollset(cq);
grpc_cq_begin_op(cq, tag); grpc_cq_begin_op(cq, tag);

@ -113,7 +113,7 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
npp->root = w.next; npp->root = w.next;
if (&w == npp->root) { if (&w == npp->root) {
if (npp->shutdown) { if (npp->shutdown) {
grpc_closure_sched(exec_ctx, npp->shutdown, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
} }
npp->root = NULL; npp->root = NULL;
} }
@ -146,7 +146,7 @@ static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(closure != NULL); GPR_ASSERT(closure != NULL);
p->shutdown = closure; p->shutdown = closure;
if (p->root == NULL) { if (p->root == NULL) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
non_polling_worker *w = p->root; non_polling_worker *w = p->root;
do { do {
@ -417,7 +417,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
cqd->outstanding_tag_count = 0; cqd->outstanding_tag_count = 0;
#endif #endif
cq_event_queue_init(&cqd->queue); cq_event_queue_init(&cqd->queue);
grpc_closure_init(&cqd->pollset_shutdown_done, on_pollset_shutdown_done, cc, GRPC_CLOSURE_INIT(&cqd->pollset_shutdown_done, on_pollset_shutdown_done, cc,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
GPR_TIMER_END("grpc_completion_queue_create_internal", 0); GPR_TIMER_END("grpc_completion_queue_create_internal", 0);

@ -105,17 +105,17 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->on_connectivity_state_change) { if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN); GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN; *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
grpc_closure_sched(exec_ctx, op->on_connectivity_state_change, GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
if (op->send_ping != NULL) { if (op->send_ping != NULL) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, op->send_ping, exec_ctx, op->send_ping,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
} }
GRPC_ERROR_UNREF(op->disconnect_with_error); GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != NULL) { if (op->on_consumed != NULL) {
grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
} }
} }
@ -128,7 +128,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info, const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) { grpc_closure *then_schedule_closure) {
grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
} }
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,

@ -269,7 +269,7 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
bool send_goaway, grpc_error *send_disconnect) { bool send_goaway, grpc_error *send_disconnect) {
struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc)); struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
grpc_closure_init(&sc->closure, shutdown_cleanup, sc, GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure); grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
grpc_channel_element *elem; grpc_channel_element *elem;
@ -337,11 +337,11 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&calld->mu_state); gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
} }
} }
@ -432,7 +432,7 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
orphan_channel(chand); orphan_channel(chand);
server_ref(chand->server); server_ref(chand->server);
maybe_finish_shutdown(exec_ctx, chand->server); maybe_finish_shutdown(exec_ctx, chand->server);
grpc_closure_init(&chand->finish_destroy_channel_closure, GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure,
finish_destroy_channel, chand, grpc_schedule_on_exec_ctx); finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
if (GRPC_TRACER_ON(grpc_server_channel_trace) && error != GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_server_channel_trace) && error != GRPC_ERROR_NONE) {
@ -497,11 +497,11 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&calld->mu_state); gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
return; return;
} }
@ -546,9 +546,9 @@ static void finish_start_new_rpc(
gpr_mu_lock(&calld->mu_state); gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem, GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
return; return;
} }
@ -563,7 +563,7 @@ static void finish_start_new_rpc(
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE; op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &calld->payload; op.data.recv_message.recv_message = &calld->payload;
grpc_closure_init(&calld->publish, publish_new_rpc, elem, GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1, grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
&calld->publish); &calld->publish);
@ -740,7 +740,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_UNREF(src_error); GRPC_ERROR_UNREF(src_error);
} }
grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error); GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
} }
static void server_mutate_op(grpc_call_element *elem, static void server_mutate_op(grpc_call_element *elem,
@ -779,9 +779,9 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
if (calld->state == NOT_STARTED) { if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem, GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else if (calld->state == PENDING) { } else if (calld->state == PENDING) {
calld->state = ZOMBIED; calld->state = ZOMBIED;
@ -819,7 +819,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
op.op = GRPC_OP_RECV_INITIAL_METADATA; op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata.recv_initial_metadata = op.data.recv_initial_metadata.recv_initial_metadata =
&calld->initial_metadata; &calld->initial_metadata;
grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem, GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1, grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
&calld->got_initial_metadata); &calld->got_initial_metadata);
@ -855,7 +855,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->call = grpc_call_from_top_element(elem); calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state); gpr_mu_init(&calld->mu_state);
grpc_closure_init(&calld->server_on_recv_initial_metadata, GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem, server_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -895,7 +895,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->next = chand->prev = chand; chand->next = chand->prev = chand;
chand->registered_methods = NULL; chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE; chand->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_closure_init(&chand->channel_connectivity_changed, GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand, channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
@ -1075,7 +1075,7 @@ void grpc_server_start(grpc_server *server) {
server_ref(server); server_ref(server);
server->starting = true; server->starting = true;
grpc_closure_sched(&exec_ctx, grpc_closure_create(start_listeners, server, GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
grpc_executor_scheduler), grpc_executor_scheduler),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -1255,7 +1255,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* Shutdown listeners */ /* Shutdown listeners */
for (l = server->listeners; l; l = l->next) { for (l = server->listeners; l; l = l->next) {
grpc_closure_init(&l->destroy_done, listener_destroy_done, server, GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
l->destroy(&exec_ctx, server, l->arg, &l->destroy_done); l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
} }
@ -1349,11 +1349,11 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&calld->mu_state); gpr_mu_lock(&calld->mu_state);
if (calld->state == ZOMBIED) { if (calld->state == ZOMBIED) {
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {
GPR_ASSERT(calld->state == PENDING); GPR_ASSERT(calld->state == PENDING);

@ -67,7 +67,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
error = error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner");
} }
grpc_closure_sched(exec_ctx, w->notify, error); GRPC_CLOSURE_SCHED(exec_ctx, w->notify, error);
gpr_free(w); gpr_free(w);
} }
GRPC_ERROR_UNREF(tracker->current_error); GRPC_ERROR_UNREF(tracker->current_error);
@ -125,7 +125,7 @@ bool grpc_connectivity_state_notify_on_state_change(
if (current == NULL) { if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers; grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) { if (w != NULL && w->notify == notify) {
grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
tracker->watchers = w->next; tracker->watchers = w->next;
gpr_free(w); gpr_free(w);
return false; return false;
@ -133,7 +133,7 @@ bool grpc_connectivity_state_notify_on_state_change(
while (w != NULL) { while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next; grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) { if (rm_candidate != NULL && rm_candidate->notify == notify) {
grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED); GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
w->next = w->next->next; w->next = w->next->next;
gpr_free(rm_candidate); gpr_free(rm_candidate);
return false; return false;
@ -144,7 +144,7 @@ bool grpc_connectivity_state_notify_on_state_change(
} else { } else {
if (cur != *current) { if (cur != *current) {
*current = cur; *current = cur;
grpc_closure_sched(exec_ctx, notify, GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error)); GRPC_ERROR_REF(tracker->current_error));
} else { } else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w)); grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
@ -197,7 +197,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify); w->notify);
} }
grpc_closure_sched(exec_ctx, w->notify, GRPC_CLOSURE_SCHED(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error)); GRPC_ERROR_REF(tracker->current_error));
gpr_free(w); gpr_free(w);
} }

@ -65,7 +65,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
there. */ there. */
refcount->destroy.scheduler = grpc_executor_scheduler; refcount->destroy.scheduler = grpc_executor_scheduler;
} }
grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
} }
} }
@ -112,7 +112,7 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg) { grpc_iomgr_cb_func cb, void *cb_arg) {
#endif #endif
gpr_ref_init(&refcount->refs, initial_refs); gpr_ref_init(&refcount->refs, initial_refs);
grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
refcount->slice_refcount.vtable = &stream_ref_slice_vtable; refcount->slice_refcount.vtable = &stream_ref_slice_vtable;
refcount->slice_refcount.sub_refcount = &refcount->slice_refcount; refcount->slice_refcount.sub_refcount = &refcount->slice_refcount;
} }
@ -202,16 +202,16 @@ void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op, grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
grpc_error *error) { grpc_error *error) {
if (op->recv_message) { if (op->recv_message) {
grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready, GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
if (op->recv_initial_metadata) { if (op->recv_initial_metadata) {
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx,
op->payload->recv_initial_metadata.recv_initial_metadata_ready, op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
grpc_closure_sched(exec_ctx, op->on_complete, error); GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, error);
if (op->cancel_stream) { if (op->cancel_stream) {
GRPC_ERROR_UNREF(op->payload->cancel_stream.cancel_error); GRPC_ERROR_UNREF(op->payload->cancel_stream.cancel_error);
} }
@ -226,13 +226,13 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg, static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
made_transport_op *op = arg; made_transport_op *op = arg;
grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op); gpr_free(op);
} }
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) { grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
made_transport_op *op = gpr_malloc(sizeof(*op)); made_transport_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op, GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete; op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op)); memset(&op->op, 0, sizeof(op->op));
@ -252,14 +252,14 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
made_transport_stream_op *op = arg; made_transport_stream_op *op = arg;
grpc_closure *c = op->inner_on_complete; grpc_closure *c = op->inner_on_complete;
gpr_free(op); gpr_free(op);
grpc_closure_run(exec_ctx, c, GRPC_ERROR_REF(error)); GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
} }
grpc_transport_stream_op_batch *grpc_make_transport_stream_op( grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) { grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_zalloc(sizeof(*op)); made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload; op->op.payload = &op->payload;
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op, GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx); op, grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete; op->inner_on_complete = on_complete;
op->op.on_complete = &op->outer_on_complete; op->op.on_complete = &op->outer_on_complete;

@ -134,7 +134,7 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_init(&outgoing);
grpc_slice_buffer_add(&outgoing, slice); grpc_slice_buffer_add(&outgoing, slice);
grpc_closure_init(&done_write_closure, done_write, &a, GRPC_CLOSURE_INIT(&done_write_closure, done_write, &a,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
/* Write data */ /* Write data */
@ -164,7 +164,7 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&args.incoming); grpc_slice_buffer_init(&args.incoming);
gpr_event_init(&args.read_done); gpr_event_init(&args.read_done);
grpc_closure read_done_closure; grpc_closure read_done_closure;
grpc_closure_init(&read_done_closure, read_done, &args, GRPC_CLOSURE_INIT(&read_done_closure, read_done, &args,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming, grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming,
&read_done_closure); &read_done_closure);

@ -54,7 +54,7 @@ static void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
(*addrs)->addrs = gpr_malloc(sizeof(*(*addrs)->addrs)); (*addrs)->addrs = gpr_malloc(sizeof(*(*addrs)->addrs));
(*addrs)->addrs[0].len = 123; (*addrs)->addrs[0].len = 123;
} }
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
} }
static grpc_ares_request *my_dns_lookup_ares( static grpc_ares_request *my_dns_lookup_ares(
@ -73,7 +73,7 @@ static grpc_ares_request *my_dns_lookup_ares(
*lb_addrs = grpc_lb_addresses_create(1, NULL); *lb_addrs = grpc_lb_addresses_create(1, NULL);
grpc_lb_addresses_set_address(*lb_addrs, 0, NULL, 0, false, NULL, NULL); grpc_lb_addresses_set_address(*lb_addrs, 0, NULL, 0, false, NULL, NULL);
} }
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
return NULL; return NULL;
} }
@ -133,7 +133,7 @@ static void call_resolver_next_after_locking(grpc_exec_ctx *exec_ctx,
a->resolver = resolver; a->resolver = resolver;
a->result = result; a->result = result;
a->on_complete = on_complete; a->on_complete = on_complete;
grpc_closure_sched(exec_ctx, grpc_closure_create( GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
call_resolver_next_now_lock_taken, a, call_resolver_next_now_lock_taken, a,
grpc_combiner_scheduler(resolver->combiner)), grpc_combiner_scheduler(resolver->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -155,7 +155,7 @@ int main(int argc, char **argv) {
gpr_event_init(&ev1); gpr_event_init(&ev1);
call_resolver_next_after_locking( call_resolver_next_after_locking(
&exec_ctx, resolver, &result, &exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx)); GRPC_CLOSURE_CREATE(on_done, &ev1, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(5, &ev1)); GPR_ASSERT(wait_loop(5, &ev1));
GPR_ASSERT(result == NULL); GPR_ASSERT(result == NULL);
@ -164,7 +164,7 @@ int main(int argc, char **argv) {
gpr_event_init(&ev2); gpr_event_init(&ev2);
call_resolver_next_after_locking( call_resolver_next_after_locking(
&exec_ctx, resolver, &result, &exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx)); GRPC_CLOSURE_CREATE(on_done, &ev2, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(30, &ev2)); GPR_ASSERT(wait_loop(30, &ev2));
GPR_ASSERT(result != NULL); GPR_ASSERT(result != NULL);

@ -101,7 +101,7 @@ static void test_fake_resolver() {
memset(&on_res_arg, 0, sizeof(on_res_arg)); memset(&on_res_arg, 0, sizeof(on_res_arg));
on_res_arg.expected_resolver_result = results; on_res_arg.expected_resolver_result = results;
gpr_event_init(&on_res_arg.ev); gpr_event_init(&on_res_arg.ev);
grpc_closure *on_resolution = grpc_closure_create( grpc_closure *on_resolution = GRPC_CLOSURE_CREATE(
on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner)); on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner));
// Set resolver results and trigger first resolution. on_resolution_cb // Set resolver results and trigger first resolution. on_resolution_cb
@ -138,7 +138,7 @@ static void test_fake_resolver() {
memset(&on_res_arg_update, 0, sizeof(on_res_arg_update)); memset(&on_res_arg_update, 0, sizeof(on_res_arg_update));
on_res_arg_update.expected_resolver_result = results_update; on_res_arg_update.expected_resolver_result = results_update;
gpr_event_init(&on_res_arg_update.ev); gpr_event_init(&on_res_arg_update.ev);
on_resolution = grpc_closure_create(on_resolution_cb, &on_res_arg_update, on_resolution = GRPC_CLOSURE_CREATE(on_resolution_cb, &on_res_arg_update,
grpc_combiner_scheduler(combiner)); grpc_combiner_scheduler(combiner));
// Set updated resolver results and trigger a second resolution. // Set updated resolver results and trigger a second resolution.

@ -57,7 +57,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
on_resolution_arg on_res_arg; on_resolution_arg on_res_arg;
memset(&on_res_arg, 0, sizeof(on_res_arg)); memset(&on_res_arg, 0, sizeof(on_res_arg));
on_res_arg.expected_server_name = uri->path; on_res_arg.expected_server_name = uri->path;
grpc_closure *on_resolution = grpc_closure_create( grpc_closure *on_resolution = GRPC_CLOSURE_CREATE(
on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx); on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result, grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,

@ -137,8 +137,8 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_tcp_server_acceptor *acceptor) { grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor); gpr_free(acceptor);
test_tcp_server *server = arg; test_tcp_server *server = arg;
grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
grpc_closure_init(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.temp_incoming_buffer); grpc_slice_buffer_init(&state.temp_incoming_buffer);
grpc_slice_buffer_init(&state.outgoing_buffer); grpc_slice_buffer_init(&state.outgoing_buffer);
state.tcp = tcp; state.tcp = tcp;

@ -387,19 +387,19 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
conn->pollset_set = grpc_pollset_set_create(); conn->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset); grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
grpc_endpoint_add_to_pollset_set(exec_ctx, endpoint, conn->pollset_set); grpc_endpoint_add_to_pollset_set(exec_ctx, endpoint, conn->pollset_set);
grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn, GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn, GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn, GRPC_CLOSURE_INIT(&conn->on_write_response_done, on_write_response_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn, GRPC_CLOSURE_INIT(&conn->on_client_read_done, on_client_read_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn, GRPC_CLOSURE_INIT(&conn->on_client_write_done, on_client_write_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn, GRPC_CLOSURE_INIT(&conn->on_server_read_done, on_server_read_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn, GRPC_CLOSURE_INIT(&conn->on_server_write_done, on_server_write_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner)); grpc_combiner_scheduler(conn->proxy->combiner));
grpc_slice_buffer_init(&conn->client_read_buffer); grpc_slice_buffer_init(&conn->client_read_buffer);
grpc_slice_buffer_init(&conn->client_deferred_write_buffer); grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
@ -491,7 +491,7 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
gpr_free(proxy->proxy_name); gpr_free(proxy->proxy_name);
grpc_channel_args_destroy(&exec_ctx, proxy->channel_args); grpc_channel_args_destroy(&exec_ctx, proxy->channel_args);
grpc_pollset_shutdown(&exec_ctx, proxy->pollset, grpc_pollset_shutdown(&exec_ctx, proxy->pollset,
grpc_closure_create(destroy_pollset, proxy->pollset, GRPC_CLOSURE_CREATE(destroy_pollset, proxy->pollset,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
grpc_combiner_unref(&exec_ctx, proxy->combiner); grpc_combiner_unref(&exec_ctx, proxy->combiner);
gpr_free(proxy); gpr_free(proxy);

@ -384,9 +384,9 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_addresses_set_address(lb_addrs, 0, NULL, 0, NULL, NULL, NULL); grpc_lb_addresses_set_address(lb_addrs, 0, NULL, 0, NULL, NULL, NULL);
*r->lb_addrs = lb_addrs; *r->lb_addrs = lb_addrs;
} }
grpc_closure_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, GRPC_ERROR_NONE);
} else { } else {
grpc_closure_sched(exec_ctx, r->on_done, GRPC_CLOSURE_SCHED(exec_ctx, r->on_done,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolution failed", &error, 1)); "Resolution failed", &error, 1));
} }
@ -408,7 +408,7 @@ void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
grpc_timer_init( grpc_timer_init(
exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)), gpr_time_from_seconds(1, GPR_TIMESPAN)),
grpc_closure_create(finish_resolve, r, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx),
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
} }
@ -424,7 +424,7 @@ grpc_ares_request *my_dns_lookup_ares(
grpc_timer_init( grpc_timer_init(
exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)), gpr_time_from_seconds(1, GPR_TIMESPAN)),
grpc_closure_create(finish_resolve, r, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx),
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
return NULL; return NULL;
} }
@ -452,7 +452,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
future_connect *fc = arg; future_connect *fc = arg;
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
*fc->ep = NULL; *fc->ep = NULL;
grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fc->closure, GRPC_ERROR_REF(error));
} else if (g_server != NULL) { } else if (g_server != NULL) {
grpc_endpoint *client; grpc_endpoint *client;
grpc_endpoint *server; grpc_endpoint *server;
@ -464,7 +464,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL); grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL); grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, fc->closure, GRPC_ERROR_NONE);
} else { } else {
sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline); sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
} }
@ -475,7 +475,7 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline) { grpc_endpoint **ep, gpr_timespec deadline) {
if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) { if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
*ep = NULL; *ep = NULL;
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Connect deadline exceeded")); "Connect deadline exceeded"));
return; return;
} }
@ -487,7 +487,7 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_timer_init( grpc_timer_init(
exec_ctx, &fc->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), exec_ctx, &fc->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(1, GPR_TIMESPAN)), gpr_time_from_millis(1, GPR_TIMESPAN)),
grpc_closure_create(do_connect, fc, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx),
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
} }

@ -84,7 +84,7 @@ static void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
(*addrs)->addrs[0].len = sizeof(*sa); (*addrs)->addrs[0].len = sizeof(*sa);
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
} }
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
} }
static grpc_ares_request *my_dns_lookup_ares( static grpc_ares_request *my_dns_lookup_ares(
@ -113,7 +113,7 @@ static grpc_ares_request *my_dns_lookup_ares(
gpr_free(sa); gpr_free(sa);
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
} }
grpc_closure_sched(exec_ctx, on_done, error); GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
return NULL; return NULL;
} }

@ -197,7 +197,7 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = arg; grpc_call_element *elem = arg;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_closure_sched( GRPC_CLOSURE_SCHED(
exec_ctx, calld->recv_im_ready, exec_ctx, calld->recv_im_ready,
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failure that's not preventable.", &error, 1), "Failure that's not preventable.", &error, 1),
@ -213,7 +213,7 @@ static void start_transport_stream_op_batch(
calld->recv_im_ready = calld->recv_im_ready =
op->payload->recv_initial_metadata.recv_initial_metadata_ready; op->payload->recv_initial_metadata.recv_initial_metadata_ready;
op->payload->recv_initial_metadata.recv_initial_metadata_ready = op->payload->recv_initial_metadata.recv_initial_metadata_ready =
grpc_closure_create(recv_im_ready, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_CREATE(recv_im_ready, elem, grpc_schedule_on_exec_ctx);
} }
grpc_call_next_op(exec_ctx, elem, op); grpc_call_next_op(exec_ctx, elem, op);
} }

@ -77,7 +77,7 @@ static void test_get(int port) {
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get( grpc_httpcli_get(
&exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response); &response);
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
@ -119,7 +119,7 @@ static void test_post(int port) {
grpc_httpcli_post( grpc_httpcli_post(
&exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5, &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
n_seconds_time(15), n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response); &response);
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
@ -195,7 +195,7 @@ int main(int argc, char **argv) {
test_post(port); test_post(port);
grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_httpcli_context_destroy(&exec_ctx, &g_context);
grpc_closure_init(&destroyed, destroy_pops, &g_pops, GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed); &destroyed);

@ -78,7 +78,7 @@ static void test_get(int port) {
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get( grpc_httpcli_get(
&exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response); &response);
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
@ -121,7 +121,7 @@ static void test_post(int port) {
grpc_httpcli_post( grpc_httpcli_post(
&exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5, &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
n_seconds_time(15), n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response); &response);
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
@ -198,7 +198,7 @@ int main(int argc, char **argv) {
test_post(port); test_post(port);
grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_httpcli_context_destroy(&exec_ctx, &g_context);
grpc_closure_init(&destroyed, destroy_pops, &g_pops, GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed); &destroyed);

@ -45,8 +45,8 @@ static void test_execute_one(void) {
gpr_event done; gpr_event done;
gpr_event_init(&done); gpr_event_init(&done);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_sched(&exec_ctx, GRPC_CLOSURE_SCHED(&exec_ctx,
grpc_closure_create(set_event_to_true, &done, GRPC_CLOSURE_CREATE(set_event_to_true, &done,
grpc_combiner_scheduler(lock)), grpc_combiner_scheduler(lock)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
@ -83,8 +83,8 @@ static void execute_many_loop(void *a) {
ex_args *c = gpr_malloc(sizeof(*c)); ex_args *c = gpr_malloc(sizeof(*c));
c->ctr = &args->ctr; c->ctr = &args->ctr;
c->value = n++; c->value = n++;
grpc_closure_sched(&exec_ctx, GRPC_CLOSURE_SCHED(&exec_ctx,
grpc_closure_create( GRPC_CLOSURE_CREATE(
check_one, c, grpc_combiner_scheduler(args->lock)), check_one, c, grpc_combiner_scheduler(args->lock)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
@ -93,8 +93,8 @@ static void execute_many_loop(void *a) {
// picking it up // picking it up
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100)); gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100));
} }
grpc_closure_sched(&exec_ctx, GRPC_CLOSURE_SCHED(&exec_ctx,
grpc_closure_create(set_event_to_true, &args->done, GRPC_CLOSURE_CREATE(set_event_to_true, &args->done,
grpc_combiner_scheduler(args->lock)), grpc_combiner_scheduler(args->lock)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
@ -131,8 +131,8 @@ static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
} }
static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_closure_sched(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
grpc_closure_create(in_finally, arg, GRPC_CLOSURE_CREATE(in_finally, arg,
grpc_combiner_finally_scheduler(arg)), grpc_combiner_finally_scheduler(arg)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
@ -143,9 +143,9 @@ static void test_execute_finally(void) {
grpc_combiner *lock = grpc_combiner_create(); grpc_combiner *lock = grpc_combiner_create();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_event_init(&got_in_finally); gpr_event_init(&got_in_finally);
grpc_closure_sched( GRPC_CLOSURE_SCHED(
&exec_ctx, &exec_ctx,
grpc_closure_create(add_finally, lock, grpc_combiner_scheduler(lock)), GRPC_CLOSURE_CREATE(add_finally, lock, grpc_combiner_scheduler(lock)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(gpr_event_wait(&got_in_finally, GPR_ASSERT(gpr_event_wait(&got_in_finally,

@ -66,7 +66,7 @@ int main(int argc, char **argv) {
g_pollset = gpr_zalloc(grpc_pollset_size()); g_pollset = gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu); grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset, GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -198,9 +198,9 @@ static void read_and_write_test(grpc_endpoint_test_config config,
state.write_done = 0; state.write_done = 0;
state.current_read_data = 0; state.current_read_data = 0;
state.current_write_data = 0; state.current_write_data = 0;
grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state, GRPC_CLOSURE_INIT(&state.done_read, read_and_write_test_read_handler, &state,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&state.done_write, read_and_write_test_write_handler, GRPC_CLOSURE_INIT(&state.done_write, read_and_write_test_write_handler,
&state, grpc_schedule_on_exec_ctx); &state, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.outgoing); grpc_slice_buffer_init(&state.outgoing);
grpc_slice_buffer_init(&state.incoming); grpc_slice_buffer_init(&state.incoming);
@ -287,19 +287,19 @@ static void multiple_shutdown_test(grpc_endpoint_test_config config) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count, GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 0); wait_for_fail_count(&exec_ctx, &fail_count, 0);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep, grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
wait_for_fail_count(&exec_ctx, &fail_count, 1); wait_for_fail_count(&exec_ctx, &fail_count, 1);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count, GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 2); wait_for_fail_count(&exec_ctx, &fail_count, 2);
grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a")); grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count, GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 3); wait_for_fail_count(&exec_ctx, &fail_count, 3);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep, grpc_endpoint_shutdown(&exec_ctx, f.client_ep,

@ -106,7 +106,7 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
int i; int i;
for (i = 0; i < num_pollsets; i++) { for (i = 0; i < num_pollsets; i++) {
grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset, GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, pollsets[i].pollset,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed); grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
@ -280,7 +280,7 @@ static void test_threading(void) {
grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc); grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc);
grpc_fd_notify_on_read( grpc_fd_notify_on_read(
&exec_ctx, shared.wakeup_desc, &exec_ctx, shared.wakeup_desc,
grpc_closure_init(&shared.on_wakeup, test_threading_wakeup, &shared, GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -296,7 +296,7 @@ static void test_threading(void) {
grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED); grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED);
grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL, "done"); grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL, "done");
grpc_pollset_shutdown(&exec_ctx, shared.pollset, grpc_pollset_shutdown(&exec_ctx, shared.pollset,
grpc_closure_create(destroy_pollset, shared.pollset, GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }

@ -205,7 +205,7 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
se->sv = sv; se->sv = sv;
se->em_fd = grpc_fd_create(fd, "listener"); se->em_fd = grpc_fd_create(fd, "listener");
grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd); grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
grpc_closure_init(&se->session_read_closure, session_read_cb, se, GRPC_CLOSURE_INIT(&se->session_read_closure, session_read_cb, se,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure); grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
@ -235,7 +235,7 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
sv->em_fd = grpc_fd_create(fd, "server"); sv->em_fd = grpc_fd_create(fd, "server");
grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd); grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
/* Register to be interested in reading from listen_fd. */ /* Register to be interested in reading from listen_fd. */
grpc_closure_init(&sv->listen_closure, listen_cb, sv, GRPC_CLOSURE_INIT(&sv->listen_closure, listen_cb, sv,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure); grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);
@ -319,7 +319,7 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
if (errno == EAGAIN) { if (errno == EAGAIN) {
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) { if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
grpc_closure_init(&cl->write_closure, client_session_write, cl, GRPC_CLOSURE_INIT(&cl->write_closure, client_session_write, cl,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure); grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
cl->client_write_cnt++; cl->client_write_cnt++;
@ -445,9 +445,9 @@ static void test_grpc_fd_change(void) {
grpc_closure second_closure; grpc_closure second_closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_init(&first_closure, first_read_callback, &a, GRPC_CLOSURE_INIT(&first_closure, first_read_callback, &a,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_closure_init(&second_closure, second_read_callback, &b, GRPC_CLOSURE_INIT(&second_closure, second_read_callback, &b,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
init_change_data(&a); init_change_data(&a);
@ -533,7 +533,7 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu); grpc_pollset_init(g_pollset, &g_mu);
test_grpc_fd(); test_grpc_fd();
test_grpc_fd_change(); test_grpc_fd_change();
grpc_closure_init(&destroyed, destroy_pollset, g_pollset, GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save