Merge pull request #9207 from ctiller/cleanup_closures

Changes to exec_ctx/closure/combiner/workqueue interfaces
pull/8807/head^2
Craig Tiller 8 years ago committed by GitHub
commit 360f5d2abf
  1. 3
      include/grpc++/resource_quota.h
  2. 3
      src/core/ext/census/grpc_filter.c
  3. 3
      src/core/ext/client_channel/channel_connectivity.c
  4. 50
      src/core/ext/client_channel/client_channel.c
  5. 8
      src/core/ext/client_channel/http_connect_handshaker.c
  6. 13
      src/core/ext/client_channel/subchannel.c
  7. 41
      src/core/ext/lb_policy/grpclb/grpclb.c
  8. 28
      src/core/ext/lb_policy/pick_first/pick_first.c
  9. 22
      src/core/ext/lb_policy/round_robin/round_robin.c
  10. 3
      src/core/ext/load_reporting/load_reporting_filter.c
  11. 13
      src/core/ext/resolver/dns/native/dns_resolver.c
  12. 4
      src/core/ext/resolver/sockaddr/sockaddr_resolver.c
  13. 10
      src/core/ext/transport/chttp2/client/chttp2_connector.c
  14. 3
      src/core/ext/transport/chttp2/server/chttp2_server.c
  15. 189
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  16. 9
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  17. 4
      src/core/ext/transport/chttp2/transport/internal.h
  18. 43
      src/core/ext/transport/cronet/transport/cronet_transport.c
  19. 9
      src/core/lib/channel/channel_stack.c
  20. 6
      src/core/lib/channel/compress_filter.c
  21. 11
      src/core/lib/channel/deadline_filter.c
  22. 8
      src/core/lib/channel/handshaker.c
  23. 15
      src/core/lib/channel/http_client_filter.c
  24. 9
      src/core/lib/channel/http_server_filter.c
  25. 5
      src/core/lib/channel/message_size_filter.c
  26. 16
      src/core/lib/http/httpcli.c
  27. 2
      src/core/lib/http/httpcli_security_connector.c
  28. 38
      src/core/lib/iomgr/closure.c
  29. 39
      src/core/lib/iomgr/closure.h
  30. 104
      src/core/lib/iomgr/combiner.c
  31. 10
      src/core/lib/iomgr/combiner.h
  32. 37
      src/core/lib/iomgr/ev_epoll_linux.c
  33. 26
      src/core/lib/iomgr/ev_poll_posix.c
  34. 5
      src/core/lib/iomgr/ev_posix.c
  35. 3
      src/core/lib/iomgr/ev_posix.h
  36. 134
      src/core/lib/iomgr/exec_ctx.c
  37. 31
      src/core/lib/iomgr/exec_ctx.h
  38. 30
      src/core/lib/iomgr/executor.c
  39. 4
      src/core/lib/iomgr/executor.h
  40. 2
      src/core/lib/iomgr/pollset_uv.c
  41. 5
      src/core/lib/iomgr/pollset_windows.c
  42. 10
      src/core/lib/iomgr/resolve_address_posix.c
  43. 6
      src/core/lib/iomgr/resolve_address_uv.c
  44. 7
      src/core/lib/iomgr/resolve_address_windows.c
  45. 99
      src/core/lib/iomgr/resource_quota.c
  46. 4
      src/core/lib/iomgr/socket_windows.c
  47. 15
      src/core/lib/iomgr/tcp_client_posix.c
  48. 2
      src/core/lib/iomgr/tcp_client_uv.c
  49. 6
      src/core/lib/iomgr/tcp_client_windows.c
  50. 17
      src/core/lib/iomgr/tcp_posix.c
  51. 16
      src/core/lib/iomgr/tcp_server_posix.c
  52. 4
      src/core/lib/iomgr/tcp_server_uv.c
  53. 11
      src/core/lib/iomgr/tcp_server_windows.c
  54. 12
      src/core/lib/iomgr/tcp_uv.c
  55. 27
      src/core/lib/iomgr/tcp_windows.c
  56. 14
      src/core/lib/iomgr/timer_generic.c
  57. 9
      src/core/lib/iomgr/timer_uv.c
  58. 10
      src/core/lib/iomgr/udp_server.c
  59. 11
      src/core/lib/iomgr/workqueue.h
  60. 5
      src/core/lib/iomgr/workqueue_uv.c
  61. 5
      src/core/lib/iomgr/workqueue_windows.c
  62. 5
      src/core/lib/security/credentials/fake/fake_credentials.c
  63. 6
      src/core/lib/security/credentials/google_default/google_default_credentials.c
  64. 8
      src/core/lib/security/credentials/jwt/jwt_verifier.c
  65. 12
      src/core/lib/security/credentials/oauth2/oauth2_credentials.c
  66. 9
      src/core/lib/security/transport/secure_endpoint.c
  67. 10
      src/core/lib/security/transport/security_connector.c
  68. 18
      src/core/lib/security/transport/security_handshaker.c
  69. 16
      src/core/lib/security/transport/server_auth_filter.c
  70. 29
      src/core/lib/surface/call.c
  71. 2
      src/core/lib/surface/channel_ping.c
  72. 3
      src/core/lib/surface/completion_queue.c
  73. 10
      src/core/lib/surface/lame_client.c
  74. 53
      src/core/lib/surface/server.c
  75. 14
      src/core/lib/transport/connectivity_state.c
  76. 27
      src/core/lib/transport/transport.c
  77. 6
      test/core/bad_client/bad_client.c
  78. 10
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
  79. 4
      test/core/client_channel/resolvers/sockaddr_resolver_test.c
  80. 2
      test/core/client_channel/set_initial_connect_string_test.c
  81. 4
      test/core/end2end/bad_server_response_test.c
  82. 4
      test/core/end2end/fake_resolver.c
  83. 26
      test/core/end2end/fixtures/http_proxy.c
  84. 14
      test/core/end2end/fuzzers/api_fuzzer.c
  85. 7
      test/core/end2end/tests/filter_causes_close.c
  86. 18
      test/core/http/httpcli_test.c
  87. 18
      test/core/http/httpscli_test.c
  88. 9
      test/core/internal_api_canaries/iomgr.c
  89. 27
      test/core/iomgr/combiner_test.c
  90. 3
      test/core/iomgr/endpoint_pair_test.c
  91. 14
      test/core/iomgr/endpoint_tests.c
  92. 3
      test/core/iomgr/ev_epoll_linux_test.c
  93. 23
      test/core/iomgr/fd_posix_test.c
  94. 45
      test/core/iomgr/resolve_address_test.c
  95. 9
      test/core/iomgr/resource_quota_test.c
  96. 7
      test/core/iomgr/tcp_client_posix_test.c
  97. 15
      test/core/iomgr/tcp_posix_test.c
  98. 5
      test/core/iomgr/tcp_server_posix_test.c
  99. 3
      test/core/iomgr/udp_server_test.c
  100. 12
      test/core/security/credentials_test.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -37,6 +37,7 @@
struct grpc_resource_quota; struct grpc_resource_quota;
#include <grpc++/impl/codegen/config.h> #include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/grpc_library.h>
namespace grpc { namespace grpc {
@ -44,7 +45,7 @@ namespace grpc {
/// A ResourceQuota can be attached to a server (via ServerBuilder), or a client /// A ResourceQuota can be attached to a server (via ServerBuilder), or a client
/// channel (via ChannelArguments). gRPC will attempt to keep memory used by /// channel (via ChannelArguments). gRPC will attempt to keep memory used by
/// all attached entities below the ResourceQuota bound. /// all attached entities below the ResourceQuota bound.
class ResourceQuota final { class ResourceQuota final : private GrpcLibraryCodegen {
public: public:
explicit ResourceQuota(const grpc::string& name); explicit ResourceQuota(const grpc::string& name);
ResourceQuota(); ResourceQuota();

@ -154,7 +154,8 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
memset(d, 0, sizeof(*d)); memset(d, 0, sizeof(*d));
d->start_ts = args->start_time; d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */ /* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem); grpc_closure_init(&d->finish_recv, server_on_done_recv, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -198,7 +198,8 @@ void grpc_channel_watch_connectivity_state(
grpc_cq_begin_op(cq, tag); grpc_cq_begin_op(cq, tag);
gpr_mu_init(&w->mu); gpr_mu_init(&w->mu);
grpc_closure_init(&w->on_complete, watch_complete, w); grpc_closure_init(&w->on_complete, watch_complete, w,
grpc_schedule_on_exec_ctx);
w->phase = WAITING; w->phase = WAITING;
w->state = last_observed_state; w->state = last_observed_state;
w->cq = cq; w->cq = cq;

@ -249,7 +249,8 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand; w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w); grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w,
grpc_schedule_on_exec_ctx);
w->state = current_state; w->state = current_state;
w->lb_policy = lb_policy; w->lb_policy = lb_policy;
grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
@ -361,14 +362,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
} }
chand->method_params_table = method_params_table; chand->method_params_table = method_params_table;
if (lb_policy != NULL) { if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
NULL);
} else if (chand->resolver == NULL /* disconnected */) { } else if (chand->resolver == NULL /* disconnected */) {
grpc_closure_list_fail_all( grpc_closure_list_fail_all(
&chand->waiting_for_config_closures, &chand->waiting_for_config_closures,
GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1)); GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
NULL);
} }
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) { if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle"); GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
@ -425,7 +424,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_op *op) { grpc_transport_op *op) {
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
GPR_ASSERT(op->set_accept_stream == false); GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) { if (op->bind_pollset != NULL) {
@ -444,9 +443,8 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->send_ping != NULL) { if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) { if (chand->lb_policy == NULL) {
grpc_exec_ctx_sched(exec_ctx, op->send_ping, grpc_closure_sched(exec_ctx, op->send_ping,
GRPC_ERROR_CREATE("Ping with no load balancing"), GRPC_ERROR_CREATE("Ping with no load balancing"));
NULL);
} else { } else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping); grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL; op->bind_pollset = NULL;
@ -465,8 +463,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (!chand->started_resolving) { if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures, grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
GRPC_ERROR_REF(op->disconnect_with_error)); GRPC_ERROR_REF(op->disconnect_with_error));
grpc_exec_ctx_enqueue_list(exec_ctx, grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
&chand->waiting_for_config_closures, NULL);
} }
if (chand->lb_policy != NULL) { if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, grpc_pollset_set_del_pollset_set(exec_ctx,
@ -511,7 +508,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&chand->mu); gpr_mu_init(&chand->mu);
chand->owning_stack = args->channel_stack; chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed, grpc_closure_init(&chand->on_resolver_result_changed,
on_resolver_result_changed, chand); on_resolver_result_changed, chand,
grpc_schedule_on_exec_ctx);
chand->interested_parties = grpc_pollset_set_create(); chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel"); "client_channel");
@ -678,8 +676,9 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
calld->waiting_ops_count = 0; calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0; calld->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops"); GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a), grpc_closure_sched(
GRPC_ERROR_NONE, NULL); exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} }
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
@ -761,14 +760,14 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
if (cpa->connected_subchannel == NULL) { if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */ /* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) { } else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL); grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
} else { } else {
call_data *calld = cpa->elem->call_data; call_data *calld = cpa->elem->call_data;
gpr_mu_lock(&calld->mu); gpr_mu_lock(&calld->mu);
if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags, cpa->connected_subchannel, cpa->initial_metadata_flags, cpa->connected_subchannel,
cpa->on_ready, GRPC_ERROR_NONE)) { cpa->on_ready, GRPC_ERROR_NONE)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
} }
gpr_mu_unlock(&calld->mu); gpr_mu_unlock(&calld->mu);
} }
@ -800,9 +799,9 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
cpa = closure->cb_arg; cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) { if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL; cpa->connected_subchannel = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, cpa->on_ready, exec_ctx, cpa->on_ready,
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
} }
} }
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
@ -853,12 +852,12 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
cpa->connected_subchannel = connected_subchannel; cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready; cpa->on_ready = on_ready;
cpa->elem = elem; cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa); grpc_closure_init(&cpa->closure, continue_picking, cpa,
grpc_schedule_on_exec_ctx);
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {
grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"), grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
NULL);
} }
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
@ -943,7 +942,8 @@ retry:
calld->connected_subchannel == NULL && calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) { op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&calld->next_step, subchannel_ready, elem); grpc_closure_init(&calld->next_step, subchannel_ready, elem,
grpc_schedule_on_exec_ctx);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from /* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so call_data should be provided to channel_data's interested_parties, so
@ -1089,7 +1089,8 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
// get the service config data once the resolver returns. // get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback. // Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
grpc_closure_init(&calld->read_service_config, read_service_config, elem); grpc_closure_init(&calld->read_service_config, read_service_config, elem,
grpc_schedule_on_exec_ctx);
grpc_closure_list_append(&chand->waiting_for_config_closures, grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE); &calld->read_service_config, GRPC_ERROR_NONE);
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
@ -1202,7 +1203,8 @@ void grpc_client_channel_watch_connectivity_state(
w->pollset = pollset; w->pollset = pollset;
w->on_complete = on_complete; w->on_complete = on_complete;
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
grpc_closure_init(&w->my_closure, on_external_watch_complete, w); grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
grpc_schedule_on_exec_ctx);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher"); "external_connectivity_watcher");
gpr_mu_lock(&chand->mu); gpr_mu_lock(&chand->mu);

@ -131,7 +131,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
handshaker->shutdown = true; handshaker->shutdown = true;
} }
// Invoke callback. // Invoke callback.
grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL); grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
} }
// Callback invoked when finished writing HTTP CONNECT request. // Callback invoked when finished writing HTTP CONNECT request.
@ -229,7 +229,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
goto done; goto done;
} }
// Success. Invoke handshake-done callback. // Success. Invoke handshake-done callback.
grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL); grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
done: done:
// Set shutdown to true so that subsequent calls to // Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing. // http_connect_handshaker_shutdown() do nothing.
@ -313,9 +313,9 @@ grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server) {
handshaker->proxy_server = gpr_strdup(proxy_server); handshaker->proxy_server = gpr_strdup(proxy_server);
grpc_slice_buffer_init(&handshaker->write_buffer); grpc_slice_buffer_init(&handshaker->write_buffer);
grpc_closure_init(&handshaker->request_done_closure, on_write_done, grpc_closure_init(&handshaker->request_done_closure, on_write_done,
handshaker); handshaker, grpc_schedule_on_exec_ctx);
grpc_closure_init(&handshaker->response_read_closure, on_read_done, grpc_closure_init(&handshaker->response_read_closure, on_read_done,
handshaker); handshaker, grpc_schedule_on_exec_ctx);
grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE, grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
&handshaker->http_response); &handshaker->http_response);
return &handshaker->base; return &handshaker->base;

@ -293,8 +293,9 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs; gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF")); old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) { if (old_refs == 1) {
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c), grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c,
GRPC_ERROR_NONE, NULL); grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} }
} }
@ -330,7 +331,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
c->args = grpc_channel_args_copy(args->args); c->args = grpc_channel_args_copy(args->args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev = c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher; &c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c); grpc_closure_init(&c->connected, subchannel_connected, c,
grpc_schedule_on_exec_ctx);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel"); "subchannel");
int initial_backoff_ms = int initial_backoff_ms =
@ -505,7 +507,8 @@ void grpc_subchannel_notify_on_state_change(
w->subchannel = c; w->subchannel = c;
w->pollset_set = interested_parties; w->pollset_set = interested_parties;
w->notify = notify; w->notify = notify;
grpc_closure_init(&w->closure, on_external_state_watcher_done, w); grpc_closure_init(&w->closure, on_external_state_watcher_done, w,
grpc_schedule_on_exec_ctx);
if (interested_parties != NULL) { if (interested_parties != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set, grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
interested_parties); interested_parties);
@ -626,7 +629,7 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
sw_subchannel->subchannel = c; sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY; sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed, grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
sw_subchannel); sw_subchannel, grpc_schedule_on_exec_ctx);
if (c->disconnected) { if (c->disconnected) {
gpr_free(sw_subchannel); gpr_free(sw_subchannel);

@ -180,8 +180,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wrapped_rr_closure_arg *wc_arg = arg; wrapped_rr_closure_arg *wc_arg = arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
NULL);
if (wc_arg->rr_policy != NULL) { if (wc_arg->rr_policy != NULL) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all /* if *target is NULL, no pick has been made by the RR policy (eg, all
@ -248,7 +247,8 @@ static void add_pending_pick(pending_pick **root,
pick_args->lb_token_mdelem_storage; pick_args->lb_token_mdelem_storage;
pp->wrapped_on_complete_arg.free_when_done = pp; pp->wrapped_on_complete_arg.free_when_done = pp;
grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure, grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
wrapped_rr_closure, &pp->wrapped_on_complete_arg); wrapped_rr_closure, &pp->wrapped_on_complete_arg,
grpc_schedule_on_exec_ctx);
*root = pp; *root = pp;
} }
@ -268,7 +268,8 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pping->wrapped_notify_arg.free_when_done = pping; pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root; pping->next = *root;
grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure, grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
wrapped_rr_closure, &pping->wrapped_notify_arg); wrapped_rr_closure, &pping->wrapped_notify_arg,
grpc_schedule_on_exec_ctx);
*root = pping; *root = pping;
} }
@ -667,7 +668,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_malloc(sizeof(rr_connectivity_data)); gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data)); memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed, grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
rr_connectivity); rr_connectivity, grpc_schedule_on_exec_ctx);
rr_connectivity->glb_policy = glb_policy; rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state; rr_connectivity->state = new_rr_state;
@ -908,15 +909,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
pp = next; pp = next;
} }
while (pping != NULL) { while (pping != NULL) {
pending_ping *next = pping->next; pending_ping *next = pping->next;
grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
pping = next; pping = next;
} }
} }
@ -932,9 +933,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
} else { } else {
pp->next = glb_policy->pending_picks; pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp; glb_policy->pending_picks = pp;
@ -957,9 +958,9 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
} else { } else {
pp->next = glb_policy->pending_picks; pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp; glb_policy->pending_picks = pp;
@ -994,11 +995,10 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) { grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) { if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL; *target = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, on_complete, exec_ctx, on_complete,
GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting " GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
"won't work without it. Failing"), "won't work without it. Failing"));
NULL);
return 0; return 0;
} }
@ -1017,7 +1017,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg)); wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg)); memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg); grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy; wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target; wc_arg->target = target;
wc_arg->wrapped_closure = on_complete; wc_arg->wrapped_closure = on_complete;
@ -1117,9 +1118,11 @@ static void lb_call_init_locked(glb_lb_policy *glb_policy) {
glb_policy->lb_call_status_details_capacity = 0; glb_policy->lb_call_status_details_capacity = 0;
grpc_closure_init(&glb_policy->lb_on_server_status_received, grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received, glb_policy); lb_on_server_status_received, glb_policy,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&glb_policy->lb_on_response_received, grpc_closure_init(&glb_policy->lb_on_response_received,
lb_on_response_received, glb_policy); lb_on_response_received, glb_policy,
grpc_schedule_on_exec_ctx);
gpr_backoff_init(&glb_policy->lb_call_backoff_state, gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS, GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,

@ -120,7 +120,7 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
pp = next; pp = next;
} }
@ -138,9 +138,9 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, pp->on_complete, exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
@ -165,9 +165,9 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, pp->on_complete, exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
@ -306,14 +306,15 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* drop the pick list: we are connected now */ /* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels"); GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
gpr_atm_rel_store(&p->selected, (gpr_atm)selected); gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
grpc_exec_ctx_sched(exec_ctx, grpc_closure_sched(exec_ctx,
grpc_closure_create(destroy_subchannels, p), grpc_closure_create(destroy_subchannels, p,
GRPC_ERROR_NONE, NULL); grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
/* update any calls that were waiting for a pick */ /* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked"); *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
grpc_connected_subchannel_notify_on_state_change( grpc_connected_subchannel_notify_on_state_change(
@ -366,8 +367,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
NULL);
gpr_free(pp); gpr_free(pp);
} }
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@ -419,8 +419,7 @@ static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (selected) { if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure); grpc_connected_subchannel_ping(exec_ctx, selected, closure);
} else { } else {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"), grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
NULL);
} }
} }
@ -485,7 +484,8 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
p->num_subchannels = subchannel_idx; p->num_subchannels = subchannel_idx;
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable); grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p); grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
grpc_schedule_on_exec_ctx);
gpr_mu_init(&p->mu); gpr_mu_init(&p->mu);
return &p->base; return &p->base;
} }

@ -321,8 +321,8 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, grpc_closure_sched(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE("Channel Shutdown"), NULL); GRPC_ERROR_CREATE("Channel Shutdown"));
gpr_free(pp); gpr_free(pp);
} }
grpc_connectivity_state_set( grpc_connectivity_state_set(
@ -348,9 +348,9 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next; pending_pick *next = pp->next;
if (pp->target == target) { if (pp->target == target) {
*target = NULL; *target = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, pp->on_complete, exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
@ -376,9 +376,9 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, pp->on_complete, exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
gpr_free(pp); gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
@ -581,7 +581,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)", "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected); (void *)selected->subchannel, (void *)selected);
} }
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
update_lb_connectivity_status(exec_ctx, sd, error); update_lb_connectivity_status(exec_ctx, sd, error);
@ -634,7 +634,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
} }
@ -684,8 +684,8 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked"); GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else { } else {
gpr_mu_unlock(&p->mu); gpr_mu_unlock(&p->mu);
grpc_exec_ctx_sched(exec_ctx, closure, grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE("Round Robin not connected"), NULL); GRPC_ERROR_CREATE("Round Robin not connected"));
} }
} }
@ -749,7 +749,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
} }
++subchannel_idx; ++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure, grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd); rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
} }
} }
if (subchannel_idx == 0) { if (subchannel_idx == 0) {

@ -114,7 +114,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
memset(calld, 0, sizeof(call_data)); memset(calld, 0, sizeof(call_data));
calld->id = (intptr_t)args->call_stack; calld->id = (intptr_t)args->call_stack;
grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem); grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
/* TODO(dgq): do something with the data /* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;

@ -112,8 +112,8 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
} }
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_exec_ctx_sched(exec_ctx, r->next_completion, grpc_closure_sched(exec_ctx, r->next_completion,
GRPC_ERROR_CREATE("Resolver Shutdown"), NULL); GRPC_ERROR_CREATE("Resolver Shutdown"));
r->next_completion = NULL; r->next_completion = NULL;
} }
gpr_mu_unlock(&r->mu); gpr_mu_unlock(&r->mu);
@ -219,9 +219,10 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!r->resolving); GPR_ASSERT(!r->resolving);
r->resolving = true; r->resolving = true;
r->addresses = NULL; r->addresses = NULL;
grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port, grpc_resolve_address(
r->interested_parties, exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
grpc_closure_create(dns_on_resolved, r), &r->addresses); grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
&r->addresses);
} }
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@ -231,7 +232,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = r->resolved_result == NULL *r->target_result = r->resolved_result == NULL
? NULL ? NULL
: grpc_channel_args_copy(r->resolved_result); : grpc_channel_args_copy(r->resolved_result);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
r->published_version = r->resolved_version; r->published_version = r->resolved_version;
} }

@ -89,7 +89,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&r->mu); gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
gpr_mu_unlock(&r->mu); gpr_mu_unlock(&r->mu);
@ -123,7 +123,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result = *r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
} }

@ -141,7 +141,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
} }
grpc_closure *notify = c->notify; grpc_closure *notify = c->notify;
c->notify = NULL; c->notify = NULL;
grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); grpc_closure_sched(exec_ctx, notify, error);
grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
c->handshake_mgr = NULL; c->handshake_mgr = NULL;
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
@ -180,7 +180,7 @@ static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
memset(c->result, 0, sizeof(*c->result)); memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify; grpc_closure *notify = c->notify;
c->notify = NULL; c->notify = NULL;
grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); grpc_closure_sched(exec_ctx, notify, error);
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
chttp2_connector_unref(exec_ctx, arg); chttp2_connector_unref(exec_ctx, arg);
} else { } else {
@ -203,7 +203,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
memset(c->result, 0, sizeof(*c->result)); memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify; grpc_closure *notify = c->notify;
c->notify = NULL; c->notify = NULL;
grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); grpc_closure_sched(exec_ctx, notify, error);
if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint); if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
chttp2_connector_unref(exec_ctx, arg); chttp2_connector_unref(exec_ctx, arg);
@ -211,7 +211,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GPR_ASSERT(c->endpoint != NULL); GPR_ASSERT(c->endpoint != NULL);
if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) { if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent, grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
c); c, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&c->initial_string_buffer); grpc_slice_buffer_init(&c->initial_string_buffer);
grpc_slice_buffer_add(&c->initial_string_buffer, grpc_slice_buffer_add(&c->initial_string_buffer,
c->args.initial_connect_string); c->args.initial_connect_string);
@ -237,7 +237,7 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
c->result = result; c->result = result;
GPR_ASSERT(c->endpoint == NULL); GPR_ASSERT(c->endpoint == NULL);
chttp2_connector_ref(con); // Ref taken for callback. chttp2_connector_ref(con); // Ref taken for callback.
grpc_closure_init(&c->connected, connected, c); grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
GPR_ASSERT(!c->connecting); GPR_ASSERT(!c->connecting);
c->connecting = true; c->connecting = true;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint, grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,

@ -281,7 +281,8 @@ grpc_error *grpc_chttp2_server_add_port(
state = gpr_malloc(sizeof(*state)); state = gpr_malloc(sizeof(*state));
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
grpc_closure_init(&state->tcp_server_shutdown_complete, grpc_closure_init(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state); tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete, err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
args, &tcp_server); args, &tcp_server);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {

@ -73,20 +73,14 @@ static const grpc_transport_vtable vtable;
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t, static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error); grpc_error *error);
static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
static void write_action_end(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t, static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error); grpc_error *error);
static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t, static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error); grpc_error *error);
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs, static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error); grpc_error *error);
static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error);
/** Set a transport level setting, and push it to our peer */ /** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_setting_id id, uint32_t value); grpc_chttp2_setting_id id, uint32_t value);
@ -112,12 +106,8 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream, void *byte_stream,
grpc_error *error_ignored); grpc_error *error_ignored);
static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error); grpc_error *error);
static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error); grpc_error *error);
@ -166,8 +156,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
and maybe they hold resources that need to be freed */ and maybe they hold resources that need to be freed */
while (t->pings.next != &t->pings) { while (t->pings.next != &t->pings) {
grpc_chttp2_outstanding_ping *ping = t->pings.next; grpc_chttp2_outstanding_ping *ping = t->pings.next;
grpc_exec_ctx_sched(exec_ctx, ping->on_recv, grpc_closure_sched(exec_ctx, ping->on_recv,
GRPC_ERROR_CREATE("Transport closed"), NULL); GRPC_ERROR_CREATE("Transport closed"));
ping->next->prev = ping->prev; ping->next->prev = ping->prev;
ping->prev->next = ping->next; ping->prev->next = ping->next;
gpr_free(ping); gpr_free(ping);
@ -246,18 +236,15 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_init(&t->outbuf); grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor); grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked, grpc_closure_init(&t->write_action, write_action, t,
t); grpc_schedule_on_exec_ctx);
grpc_closure_init(&t->write_action, write_action, t); grpc_closure_init(&t->read_action_locked, read_action_locked, t,
grpc_closure_init(&t->write_action_end, write_action_end, t); grpc_combiner_scheduler(t->combiner, false));
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t); grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
grpc_closure_init(&t->read_action_begin, read_action_begin, t); grpc_combiner_scheduler(t->combiner, false));
grpc_closure_init(&t->read_action_locked, read_action_locked, t);
grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
grpc_closure_init(&t->destructive_reclaimer_locked, grpc_closure_init(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t); destructive_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_chttp2_goaway_parser_init(&t->goaway_parser); grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser); grpc_chttp2_hpack_parser_init(&t->hpack_parser);
@ -395,9 +382,10 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_combiner_execute(exec_ctx, t->combiner, grpc_closure_sched(exec_ctx, grpc_closure_create(
grpc_closure_create(destroy_transport_locked, t), destroy_transport_locked, t,
GRPC_ERROR_NONE, false); grpc_combiner_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
} }
static void close_transport_locked(grpc_exec_ctx *exec_ctx, static void close_transport_locked(grpc_exec_ctx *exec_ctx,
@ -471,8 +459,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_data_parser_init(&s->data_parser); grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer); grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_closure_init(&s->complete_fetch, complete_fetch, s); grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s); grpc_schedule_on_exec_ctx);
GRPC_CHTTP2_REF_TRANSPORT(t, "stream"); GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
@ -547,9 +535,10 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
s->destroy_stream_arg = and_free_memory; s->destroy_stream_arg = and_free_memory;
grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s); grpc_closure_sched(
grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream, exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
GRPC_ERROR_NONE, false); grpc_combiner_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0); GPR_TIMER_END("destroy_stream", 0);
} }
@ -600,7 +589,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
write_state_name(st), reason)); write_state_name(st), reason));
t->write_state = st; t->write_state = st;
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) { if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
grpc_exec_ctx_enqueue_list(exec_ctx, &t->run_after_write, NULL); grpc_closure_list_sched(exec_ctx, &t->run_after_write);
if (t->close_transport_on_writes_finished != NULL) { if (t->close_transport_on_writes_finished != NULL) {
grpc_error *err = t->close_transport_on_writes_finished; grpc_error *err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = NULL; t->close_transport_on_writes_finished = NULL;
@ -618,9 +607,12 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_WRITE_STATE_IDLE: case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason); set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_combiner_execute_finally(exec_ctx, t->combiner, grpc_closure_sched(
&t->write_action_begin_locked, exec_ctx,
GRPC_ERROR_NONE, covered_by_poller); grpc_closure_init(
&t->write_action_begin_locked, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)),
GRPC_ERROR_NONE);
break; break;
case GRPC_CHTTP2_WRITE_STATE_WRITING: case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state( set_write_state(
@ -662,7 +654,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) { if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing"); "begin writing");
grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
} else { } else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing"); "begin writing nothing");
@ -674,19 +666,13 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) { static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
grpc_chttp2_transport *t = gt; grpc_chttp2_transport *t = gt;
GPR_TIMER_BEGIN("write_action", 0); GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end); grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t,
grpc_combiner_scheduler(t->combiner, false)));
GPR_TIMER_END("write_action", 0); GPR_TIMER_END("write_action", 0);
} }
static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error) {
grpc_chttp2_transport *t = gt;
GPR_TIMER_BEGIN("write_action_end", 0);
grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked,
GRPC_ERROR_REF(error), false);
GPR_TIMER_END("write_action_end", 0);
}
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) { grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0); GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
@ -716,18 +702,24 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]"); "continue writing [!covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_combiner_execute_finally(exec_ctx, t->combiner, grpc_closure_run(
&t->write_action_begin_locked, exec_ctx,
GRPC_ERROR_NONE, false); grpc_closure_init(
&t->write_action_begin_locked, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
break; break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER: case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
GPR_TIMER_MARK("state=writing_stale_with_poller", 0); GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [covered]"); "continue writing [covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_combiner_execute_finally(exec_ctx, t->combiner, grpc_closure_run(
&t->write_action_begin_locked, exec_ctx,
GRPC_ERROR_NONE, true); grpc_closure_init(&t->write_action_begin_locked,
write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, true)),
GRPC_ERROR_NONE);
break; break;
} }
@ -965,15 +957,6 @@ static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
} }
} }
static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
grpc_chttp2_stream *s = gs;
grpc_chttp2_transport *t = s->t;
grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked,
GRPC_ERROR_REF(error),
s->complete_fetch_covered_by_poller);
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id, static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
@ -1009,7 +992,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_closure *on_complete = op->on_complete; grpc_closure *on_complete = op->on_complete;
if (on_complete == NULL) { if (on_complete == NULL) {
on_complete = grpc_closure_create(do_nothing, NULL); on_complete =
grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx);
} }
/* use final_data as a barrier until enqueue time; the inital counter is /* use final_data as a barrier until enqueue time; the inital counter is
@ -1212,13 +1196,15 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(str); gpr_free(str);
} }
grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked,
op);
op->transport_private.args[0] = gt; op->transport_private.args[0] = gt;
op->transport_private.args[1] = gs; op->transport_private.args[1] = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure, grpc_closure_sched(
GRPC_ERROR_NONE, op->covered_by_poller); exec_ctx,
grpc_closure_init(
&op->transport_private.closure, perform_stream_op_locked, op,
grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0); GPR_TIMER_END("perform_stream_op", 0);
} }
@ -1247,7 +1233,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_outstanding_ping *ping; grpc_chttp2_outstanding_ping *ping;
for (ping = t->pings.next; ping != &t->pings; ping = ping->next) { for (ping = t->pings.next; ping != &t->pings; ping = ping->next) {
if (0 == memcmp(opaque_8bytes, ping->id, 8)) { if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE);
ping->next->prev = ping->prev; ping->next->prev = ping->prev;
ping->prev->next = ping->next; ping->prev->next = ping->next;
gpr_free(ping); gpr_free(ping);
@ -1321,11 +1307,12 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
char *msg = grpc_transport_op_string(op); char *msg = grpc_transport_op_string(op);
gpr_free(msg); gpr_free(msg);
op->transport_private.args[0] = gt; op->transport_private.args[0] = gt;
grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked,
op);
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op"); GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure, grpc_closure_sched(
GRPC_ERROR_NONE, false); exec_ctx, grpc_closure_init(&op->transport_private.closure,
perform_transport_op_locked, op,
grpc_combiner_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
} }
/******************************************************************************* /*******************************************************************************
@ -1801,19 +1788,6 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
* INPUT PROCESSING - PARSING * INPUT PROCESSING - PARSING
*/ */
static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
/* Control flow:
reading_action_locked ->
(parse_unlocked -> post_parse_locked)? ->
post_reading_action_locked */
GPR_TIMER_BEGIN("reading_action", 0);
grpc_chttp2_transport *t = tp;
grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked,
GRPC_ERROR_REF(error), false);
GPR_TIMER_END("reading_action", 0);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) { grpc_chttp2_transport *t) {
grpc_http_parser parser; grpc_http_parser parser;
@ -1913,7 +1887,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_slice_buffer_reset_and_unref(&t->read_buffer); grpc_slice_buffer_reset_and_unref(&t->read_buffer);
if (keep_reading) { if (keep_reading) {
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin); grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else { } else {
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@ -2050,10 +2025,12 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
bs->next_action.slice = slice; bs->next_action.slice = slice;
bs->next_action.max_size_hint = max_size_hint; bs->next_action.max_size_hint = max_size_hint;
bs->next_action.on_complete = on_complete; bs->next_action.on_complete = on_complete;
grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked, grpc_closure_sched(
bs); exec_ctx,
grpc_combiner_execute(exec_ctx, bs->transport->combiner, grpc_closure_init(
&bs->next_action.closure, GRPC_ERROR_NONE, false); &bs->next_action.closure, incoming_byte_stream_next_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner, false)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_next", 0); GPR_TIMER_END("incoming_byte_stream_next", 0);
return 0; return 0;
} }
@ -2075,10 +2052,12 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0); GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
grpc_chttp2_incoming_byte_stream *bs = grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream; (grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked, grpc_closure_sched(
bs); exec_ctx,
grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action, grpc_closure_init(
GRPC_ERROR_NONE, false); &bs->destroy_action, incoming_byte_stream_destroy_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner, false)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0); GPR_TIMER_END("incoming_byte_stream_destroy", 0);
} }
@ -2086,7 +2065,7 @@ static void incoming_byte_stream_publish_error(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error) { grpc_error *error) {
GPR_ASSERT(error != GRPC_ERROR_NONE); GPR_ASSERT(error != GRPC_ERROR_NONE);
grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL); grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error));
bs->on_next = NULL; bs->on_next = NULL;
GRPC_ERROR_UNREF(bs->error); GRPC_ERROR_UNREF(bs->error);
bs->error = error; bs->error = error;
@ -2103,7 +2082,7 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice); bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
if (bs->on_next != NULL) { if (bs->on_next != NULL) {
*bs->next = slice; *bs->next = slice;
grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE);
bs->on_next = NULL; bs->on_next = NULL;
} else { } else {
grpc_slice_buffer_add(&bs->slices, slice); grpc_slice_buffer_add(&bs->slices, slice);
@ -2171,7 +2150,7 @@ static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer"); GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx, grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep), grpc_endpoint_get_resource_user(t->ep),
false, &t->benign_reclaimer); false, &t->benign_reclaimer_locked);
} }
} }
@ -2182,22 +2161,8 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer"); GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx, grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep), grpc_endpoint_get_resource_user(t->ep),
true, &t->destructive_reclaimer); true, &t->destructive_reclaimer_locked);
}
} }
static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
GRPC_ERROR_REF(error), false);
}
static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
GRPC_ERROR_REF(error), false);
} }
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -2380,5 +2345,5 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer); grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer); gpr_free(read_buffer);
} }
read_action_begin(exec_ctx, t, GRPC_ERROR_NONE); grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
} }

@ -1634,10 +1634,11 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
however -- it might be that we receive a RST_STREAM following this however -- it might be that we receive a RST_STREAM following this
and can avoid the extra write */ and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst"); GRPC_CHTTP2_STREAM_REF(s, "final_rst");
grpc_combiner_execute_finally( grpc_closure_sched(
exec_ctx, t->combiner, exec_ctx, grpc_closure_create(force_client_rst_stream, s,
grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE, grpc_combiner_finally_scheduler(
false); t->combiner, false)),
GRPC_ERROR_NONE);
} }
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);

@ -212,10 +212,8 @@ struct grpc_chttp2_transport {
grpc_closure write_action_begin_locked; grpc_closure write_action_begin_locked;
grpc_closure write_action; grpc_closure write_action;
grpc_closure write_action_end;
grpc_closure write_action_end_locked; grpc_closure write_action_end_locked;
grpc_closure read_action_begin;
grpc_closure read_action_locked; grpc_closure read_action_locked;
/** incoming read bytes */ /** incoming read bytes */
@ -336,10 +334,8 @@ struct grpc_chttp2_transport {
/** have we scheduled a destructive cleanup? */ /** have we scheduled a destructive cleanup? */
bool destructive_reclaimer_registered; bool destructive_reclaimer_registered;
/** benign cleanup closure */ /** benign cleanup closure */
grpc_closure benign_reclaimer;
grpc_closure benign_reclaimer_locked; grpc_closure benign_reclaimer_locked;
/** destructive cleanup closure */ /** destructive cleanup closure */
grpc_closure destructive_reclaimer;
grpc_closure destructive_reclaimer_locked; grpc_closure destructive_reclaimer_locked;
}; };

@ -849,17 +849,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_RECV_INITIAL_METADATA)) { OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready, grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, stream_op->recv_initial_metadata_ready, exec_ctx, stream_op->recv_initial_metadata_ready,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else { } else {
grpc_chttp2_incoming_metadata_buffer_publish( grpc_chttp2_incoming_metadata_buffer_publish(
&oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata); &oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata);
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready, grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
} }
stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true; stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
@ -910,22 +910,22 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled."); CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed."); CRONET_LOG(GPR_DEBUG, "Stream failed.");
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, stream_op->recv_message_ready, exec_ctx, stream_op->recv_message_ready,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed == true) { } else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */ /* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed"); CRONET_LOG(GPR_DEBUG, "read stream closed");
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
@ -958,8 +958,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
&stream_state->rs.read_slice_buffer, 0); &stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) = *((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs; (grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK; result = ACTION_TAKEN_NO_CALLBACK;
@ -993,8 +993,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
&stream_state->rs.read_slice_buffer, 0); &stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) = *((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs; (grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true; stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true;
/* Do an extra read to trigger on_succeeded() callback in case connection /* Do an extra read to trigger on_succeeded() callback in case connection
@ -1055,18 +1055,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_ON_COMPLETE)) { OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) { if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, grpc_closure_sched(exec_ctx, stream_op->on_complete,
GRPC_ERROR_REF(stream_state->cancel_error), NULL); GRPC_ERROR_REF(stream_state->cancel_error));
} else if (stream_state->state_callback_received[OP_FAILED]) { } else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, stream_op->on_complete, exec_ctx, stream_op->on_complete,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else { } else {
/* All actions in this stream_op are complete. Call the on_complete /* All actions in this stream_op are complete. Call the on_complete
* callback * callback
*/ */
grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
NULL);
} }
oas->state.state_op_done[OP_ON_COMPLETE] = true; oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true; oas->done = true;

@ -297,7 +297,8 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op)); memset(op, 0, sizeof(*op));
op->cancel_error = GRPC_ERROR_CANCELLED; op->cancel_error = GRPC_ERROR_CANCELLED;
op->on_complete = grpc_closure_create(destroy_op, op); op->on_complete =
grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
elem->filter->start_transport_stream_op(exec_ctx, elem, op); elem->filter->start_transport_stream_op(exec_ctx, elem, op);
} }
@ -307,7 +308,8 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
grpc_slice *optional_message) { grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op)); memset(op, 0, sizeof(*op));
op->on_complete = grpc_closure_create(destroy_op, op); op->on_complete =
grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_cancellation_with_message(op, status, grpc_transport_stream_op_add_cancellation_with_message(op, status,
optional_message); optional_message);
elem->filter->start_transport_stream_op(exec_ctx, elem, op); elem->filter->start_transport_stream_op(exec_ctx, elem, op);
@ -319,7 +321,8 @@ void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
grpc_slice *optional_message) { grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op)); memset(op, 0, sizeof(*op));
op->on_complete = grpc_closure_create(destroy_op, op); op->on_complete =
grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_close(op, status, optional_message); grpc_transport_stream_op_add_close(op, status, optional_message);
elem->filter->start_transport_stream_op(exec_ctx, elem, op); elem->filter->start_transport_stream_op(exec_ctx, elem, op);
} }

@ -269,8 +269,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */ /* initialize members */
grpc_slice_buffer_init(&calld->slices); grpc_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0; calld->has_compression_algorithm = 0;
grpc_closure_init(&calld->got_slice, got_slice, elem); grpc_closure_init(&calld->got_slice, got_slice, elem,
grpc_closure_init(&calld->send_done, send_done, elem); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -123,7 +123,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
static void inject_on_complete_cb(grpc_deadline_state* deadline_state, static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
grpc_transport_stream_op* op) { grpc_transport_stream_op* op) {
deadline_state->next_on_complete = op->on_complete; deadline_state->next_on_complete = op->on_complete;
grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state); grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
grpc_schedule_on_exec_ctx);
op->on_complete = &deadline_state->on_complete; op->on_complete = &deadline_state->on_complete;
} }
@ -172,8 +173,9 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state)); struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
state->elem = elem; state->elem = elem;
state->deadline = deadline; state->deadline = deadline;
grpc_closure_init(&state->closure, start_timer_after_init, state); grpc_closure_init(&state->closure, start_timer_after_init, state,
grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
} }
} }
@ -290,7 +292,8 @@ static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready; calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
calld->recv_initial_metadata = op->recv_initial_metadata; calld->recv_initial_metadata = op->recv_initial_metadata;
grpc_closure_init(&calld->recv_initial_metadata_ready, grpc_closure_init(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem); recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready; op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
} }
// Make sure we know when the call is complete, so that we can cancel // Make sure we know when the call is complete, so that we can cancel

@ -165,7 +165,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// Cancel deadline timer, since we're invoking the on_handshake_done // Cancel deadline timer, since we're invoking the on_handshake_done
// callback now. // callback now.
grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
grpc_exec_ctx_sched(exec_ctx, &mgr->on_handshake_done, error, NULL); grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error);
mgr->shutdown = true; mgr->shutdown = true;
} else { } else {
grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index], grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
@ -218,8 +218,10 @@ void grpc_handshake_manager_do_handshake(
grpc_slice_buffer_init(mgr->args.read_buffer); grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers. // Initialize state needed for calling handshakers.
mgr->acceptor = acceptor; mgr->acceptor = acceptor;
grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr); grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr,
grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args); grpc_schedule_on_exec_ctx);
grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
grpc_schedule_on_exec_ctx);
// Start deadline timer, which owns a ref. // Start deadline timer, which owns a ref.
gpr_ref(&mgr->refs); gpr_ref(&mgr->refs);
grpc_timer_init(exec_ctx, &mgr->deadline_timer, grpc_timer_init(exec_ctx, &mgr->deadline_timer,

@ -352,12 +352,17 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->send_message_blocked = false; calld->send_message_blocked = false;
grpc_slice_buffer_init(&calld->slices); grpc_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv_initial_metadata, grpc_closure_init(&calld->hc_on_recv_initial_metadata,
hc_on_recv_initial_metadata, elem); hc_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hc_on_recv_trailing_metadata, grpc_closure_init(&calld->hc_on_recv_trailing_metadata,
hc_on_recv_trailing_metadata, elem); hc_on_recv_trailing_metadata, elem,
grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->got_slice, got_slice, elem); grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem,
grpc_closure_init(&calld->send_done, send_done, elem); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->got_slice, got_slice, elem,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -334,9 +334,12 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
/* initialize members */ /* initialize members */
memset(calld, 0, sizeof(*calld)); memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem); grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem,
grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem); grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem); grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&calld->read_slice_buffer); grpc_slice_buffer_init(&calld->read_slice_buffer);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }

@ -124,7 +124,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
gpr_free(message_string); gpr_free(message_string);
} }
// Invoke the next callback. // Invoke the next callback.
grpc_exec_ctx_sched(exec_ctx, calld->next_recv_message_ready, error, NULL); grpc_closure_sched(exec_ctx, calld->next_recv_message_ready, error);
} }
// Start transport stream op. // Start transport stream op.
@ -160,7 +160,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
channel_data* chand = elem->channel_data; channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data; call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL; calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem); grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values. // Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we // Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response // apply the max request size to the send limit and the max response

@ -103,7 +103,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) { grpc_error *error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent, grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set); req->context->pollset_set);
grpc_exec_ctx_sched(exec_ctx, req->on_done, error, NULL); grpc_closure_sched(exec_ctx, req->on_done, error);
grpc_http_parser_destroy(&req->parser); grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) { if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses); grpc_resolved_addresses_destroy(req->addresses);
@ -224,7 +224,8 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
return; return;
} }
addr = &req->addresses->addrs[req->next_address++]; addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req); grpc_closure_init(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg; grpc_arg arg;
arg.key = GRPC_ARG_RESOURCE_QUOTA; arg.key = GRPC_ARG_RESOURCE_QUOTA;
arg.type = GRPC_ARG_POINTER; arg.type = GRPC_ARG_POINTER;
@ -266,8 +267,9 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->pollent = pollent; req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE; req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_internal_ref(resource_quota); req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&req->on_read, on_read, req); grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
grpc_closure_init(&req->done_write, done_write, req); grpc_closure_init(&req->done_write, done_write, req,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&req->incoming); grpc_slice_buffer_init(&req->incoming);
grpc_slice_buffer_init(&req->outgoing); grpc_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name); grpc_iomgr_register_object(&req->iomgr_obj, name);
@ -277,9 +279,11 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(pollent); GPR_ASSERT(pollent);
grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent, grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set); req->context->pollset_set);
grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port, grpc_resolve_address(
exec_ctx, request->host, req->handshaker->default_port,
req->context->pollset_set, req->context->pollset_set,
grpc_closure_create(on_resolved, req), &req->addresses); grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx),
&req->addresses);
} }
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,

@ -96,7 +96,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE(msg); error = GRPC_ERROR_CREATE(msg);
gpr_free(msg); gpr_free(msg);
} }
grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }

@ -37,10 +37,13 @@
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) { void *cb_arg,
grpc_closure_scheduler *scheduler) {
closure->cb = cb; closure->cb = cb;
closure->cb_arg = cb_arg; closure->cb_arg = cb_arg;
closure->scheduler = scheduler;
return closure;
} }
void grpc_closure_list_init(grpc_closure_list *closure_list) { void grpc_closure_list_init(grpc_closure_list *closure_list) {
@ -105,11 +108,12 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
cb(exec_ctx, cb_arg, error); cb(exec_ctx, cb_arg, error);
} }
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) { grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
wrapped_closure *wc = gpr_malloc(sizeof(*wc)); wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb; wc->cb = cb;
wc->cb_arg = cb_arg; wc->cb_arg = cb_arg;
grpc_closure_init(&wc->wrapper, closure_wrapper, wc); grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
return &wc->wrapper; return &wc->wrapper;
} }
@ -117,8 +121,30 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) { grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0); GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) { if (c != NULL) {
c->cb(exec_ctx, c->cb_arg, error); c->scheduler->vtable->run(exec_ctx, c, error);
} } else {
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
}
GPR_TIMER_END("grpc_closure_run", 0); GPR_TIMER_END("grpc_closure_run", 0);
} }
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
c->scheduler->vtable->sched(exec_ctx, c, error);
} else {
GRPC_ERROR_UNREF(error);
}
GPR_TIMER_END("grpc_closure_sched", 0);
}
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
grpc_closure *c = list->head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
c = next;
}
list->head = list->tail = NULL;
}

@ -59,6 +59,22 @@ typedef struct grpc_closure_list {
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg, typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error); grpc_error *error);
typedef struct grpc_closure_scheduler grpc_closure_scheduler;
typedef struct grpc_closure_scheduler_vtable {
/* NOTE: for all these functions, closure->scheduler == the scheduler that was
used to find this vtable */
void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
} grpc_closure_scheduler_vtable;
/** Abstract type that can schedule closures for execution */
struct grpc_closure_scheduler {
const grpc_closure_scheduler_vtable *vtable;
};
/** A closure over a grpc_iomgr_cb_func. */ /** A closure over a grpc_iomgr_cb_func. */
struct grpc_closure { struct grpc_closure {
/** Once queued, next indicates the next queued closure; before then, scratch /** Once queued, next indicates the next queued closure; before then, scratch
@ -75,6 +91,10 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */ /** Arguments to be passed to "cb". */
void *cb_arg; void *cb_arg;
/** Scheduler to schedule against: NULL to schedule against current execution
context */
grpc_closure_scheduler *scheduler;
/** Once queued, the result of the closure. Before then: scratch space */ /** Once queued, the result of the closure. Before then: scratch space */
union { union {
grpc_error *error; grpc_error *error;
@ -82,12 +102,14 @@ struct grpc_closure {
} error_data; } error_data;
}; };
/** Initializes \a closure with \a cb and \a cb_arg. */ /** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg); void *cb_arg,
grpc_closure_scheduler *scheduler);
/* Create a heap allocated closure: try to avoid except for very rare events */ /* Create a heap allocated closure: try to avoid except for very rare events */
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg); grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_LIST_INIT \ #define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL } { NULL, NULL }
@ -115,4 +137,13 @@ bool grpc_closure_list_empty(grpc_closure_list list);
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error); grpc_error *error);
/** Schedule a closure to be run. Does not need to be run from a safe point. */
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
/** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
grpc_closure_list *closure_list);
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */ #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */

@ -56,6 +56,10 @@ int grpc_combiner_trace = 0;
struct grpc_combiner { struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx; grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue; grpc_workqueue *optional_workqueue;
grpc_closure_scheduler uncovered_scheduler;
grpc_closure_scheduler covered_scheduler;
grpc_closure_scheduler uncovered_finally_scheduler;
grpc_closure_scheduler covered_finally_scheduler;
gpr_mpscq queue; gpr_mpscq queue;
// state is: // state is:
// lower bit - zero if orphaned (STATE_UNORPHANED) // lower bit - zero if orphaned (STATE_UNORPHANED)
@ -70,6 +74,26 @@ struct grpc_combiner {
grpc_closure offload; grpc_closure offload;
}; };
static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure,
grpc_error *error);
static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure,
grpc_error *error);
static const grpc_closure_scheduler_vtable scheduler_uncovered = {
combiner_exec_uncovered, combiner_exec_uncovered};
static const grpc_closure_scheduler_vtable scheduler_covered = {
combiner_exec_covered, combiner_exec_covered};
static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
combiner_finally_exec_uncovered, combiner_finally_exec_uncovered};
static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
combiner_finally_exec_covered, combiner_finally_exec_covered};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct { typedef struct {
@ -102,11 +126,16 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
lock->time_to_execute_final_list = false; lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue; lock->optional_workqueue = optional_workqueue;
lock->final_list_covered_by_poller = false; lock->final_list_covered_by_poller = false;
lock->uncovered_scheduler.vtable = &scheduler_uncovered;
lock->covered_scheduler.vtable = &scheduler_covered;
lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0); gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue); gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list); grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock); grpc_closure_init(&lock->offload, offload, lock,
grpc_workqueue_scheduler(lock->optional_workqueue));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock; return lock;
} }
@ -148,7 +177,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
} }
} }
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error, grpc_closure *cl, grpc_error *error,
bool covered_by_poller) { bool covered_by_poller) {
GPR_TIMER_BEGIN("combiner.execute", 0); GPR_TIMER_BEGIN("combiner.execute", 0);
@ -171,6 +200,24 @@ void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_TIMER_END("combiner.execute", 0); GPR_TIMER_END("combiner.execute", 0);
} }
#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
((grpc_combiner *)(((char *)((closure)->scheduler)) - \
offsetof(grpc_combiner, scheduler_name)))
static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
combiner_exec(exec_ctx,
COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
error, false);
}
static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
combiner_exec(exec_ctx,
COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
error, true);
}
static void move_next(grpc_exec_ctx *exec_ctx) { static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner = exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@ -188,8 +235,7 @@ static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx); move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock, GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue)); lock->optional_workqueue));
grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload, grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
GRPC_ERROR_NONE);
} }
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
@ -312,13 +358,11 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
} }
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) { grpc_error *error);
grpc_combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
GRPC_ERROR_REF(error), false);
}
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error, grpc_combiner *lock, grpc_closure *closure,
grpc_error *error,
bool covered_by_poller) { bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log( GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock, GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
@ -326,9 +370,10 @@ void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_TIMER_BEGIN("combiner.execute_finally", 0); GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) { if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0); GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock, grpc_closure_sched(
grpc_closure_create(enqueue_finally, closure), error, exec_ctx, grpc_closure_create(enqueue_finally, closure,
false); grpc_combiner_scheduler(lock, false)),
error);
GPR_TIMER_END("combiner.execute_finally", 0); GPR_TIMER_END("combiner.execute_finally", 0);
return; return;
} }
@ -342,3 +387,36 @@ void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure_list_append(&lock->final_list, closure, error); grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0); GPR_TIMER_END("combiner.execute_finally", 0);
} }
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
GRPC_ERROR_REF(error), false);
}
static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *cl,
grpc_error *error) {
combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
cl, uncovered_finally_scheduler),
cl, error, false);
}
static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_closure *cl, grpc_error *error) {
combiner_execute_finally(
exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
cl, error, true);
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
bool covered_by_poller) {
return covered_by_poller ? &combiner->covered_scheduler
: &combiner->uncovered_scheduler;
}
grpc_closure_scheduler *grpc_combiner_finally_scheduler(
grpc_combiner *combiner, bool covered_by_poller) {
return covered_by_poller ? &combiner->covered_finally_scheduler
: &combiner->uncovered_finally_scheduler;
}

@ -50,13 +50,11 @@
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue); grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
// Destroy the lock // Destroy the lock
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock); void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock. // Fetch a scheduler to schedule closures against
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool covered_by_poller); bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking. // Scheduler to execute \a action within the lock just prior to unlocking.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool covered_by_poller); bool covered_by_poller);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx); bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);

@ -202,6 +202,8 @@ static void fd_global_shutdown(void);
/* This is also used as grpc_workqueue (by directly casing it) */ /* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island { typedef struct polling_island {
grpc_closure_scheduler workqueue_scheduler;
gpr_mu mu; gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount. the refcount.
@ -305,6 +307,8 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */ /* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#ifdef GRPC_TSAN #ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and /* Currently TSAN may incorrectly flag data races between epoll_ctl and
@ -317,6 +321,9 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync; gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */ #endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue};
static void pi_add_ref(polling_island *pi); static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
@ -529,6 +536,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE; *error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi)); pi = gpr_malloc(sizeof(*pi));
pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu); gpr_mu_init(&pi->mu);
pi->fd_cnt = 0; pi->fd_cnt = 0;
pi->fd_capacity = 0; pi->fd_capacity = 0;
@ -800,10 +808,10 @@ static polling_island *polling_island_merge(polling_island *p,
return q; return q;
} }
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) { grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0); GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events /* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function * this kicks off ends up destroying the workqueue before this function
* completes */ * completes */
@ -820,6 +828,12 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("workqueue.enqueue", 0); GPR_TIMER_END("workqueue.enqueue", 0);
} }
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
polling_island *pi = (polling_island *)workqueue;
return workqueue == NULL ? grpc_schedule_on_exec_ctx
: &pi->workqueue_scheduler;
}
static grpc_error *polling_island_global_init() { static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
@ -1030,8 +1044,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL; fd->po.pi = NULL;
} }
grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
NULL);
gpr_mu_unlock(&fd->po.mu); gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */ UNREF_BY(fd, 2, reason); /* Drop the reference */
@ -1057,16 +1070,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) { grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) { if (fd->shutdown) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
NULL);
} else if (*st == CLOSURE_NOT_READY) { } else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */ /* not ready ==> switch to a waiting state by setting the closure */
*st = closure; *st = closure;
} else if (*st == CLOSURE_READY) { } else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */ /* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
NULL);
} else { } else {
/* upcallptr was set to a different closure. This is an error! */ /* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
@ -1088,7 +1099,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0; return 0;
} else { } else {
/* waiting ==> queue closure */ /* waiting ==> queue closure */
grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
return 1; return 1;
} }
@ -1359,7 +1370,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */ /* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
/* pollset->po.mu lock must be held by the caller before calling this */ /* pollset->po.mu lock must be held by the caller before calling this */
@ -1410,7 +1421,9 @@ static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
workqueue_maybe_wakeup(pi); workqueue_maybe_wakeup(pi);
} }
grpc_closure *c = (grpc_closure *)n; grpc_closure *c = (grpc_closure *)n;
grpc_closure_run(exec_ctx, c, c->error_data.error); grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
return true; return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) { } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped /* n == NULL might mean there's work but it's not available to be popped
@ -1959,7 +1972,7 @@ static const grpc_event_engine_vtable vtable = {
.workqueue_ref = workqueue_ref, .workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref, .workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue, .workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine, .shutdown_engine = shutdown_engine,
}; };

@ -397,7 +397,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
if (!fd->released) { if (!fd->released) {
close(fd->fd); close(fd->fd);
} }
grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
} }
static int fd_wrapped_fd(grpc_fd *fd) { static int fd_wrapped_fd(grpc_fd *fd) {
@ -457,16 +457,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) { grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) { if (fd->shutdown) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
NULL);
} else if (*st == CLOSURE_NOT_READY) { } else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */ /* not ready ==> switch to a waiting state by setting the closure */
*st = closure; *st = closure;
} else if (*st == CLOSURE_READY) { } else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */ /* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
NULL);
maybe_wake_one_watcher_locked(fd); maybe_wake_one_watcher_locked(fd);
} else { } else {
/* upcallptr was set to a different closure. This is an error! */ /* upcallptr was set to a different closure. This is an error! */
@ -489,7 +487,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0; return 0;
} else { } else {
/* waiting ==> queue closure */ /* waiting ==> queue closure */
grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
return 1; return 1;
} }
@ -852,7 +850,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller"); GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} }
pollset->fd_count = 0; pollset->fd_count = 0;
grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
} }
static void work_combine_error(grpc_error **composite, grpc_error *error) { static void work_combine_error(grpc_error **composite, grpc_error *error) {
@ -901,7 +899,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!pollset_has_workers(pollset) && if (!pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) { !grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("pollset_work.idle_jobs", 0); GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
goto done; goto done;
} }
/* If we're shutting down then we don't execute any extended work */ /* If we're shutting down then we don't execute any extended work */
@ -1081,7 +1079,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) { } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
@ -1100,7 +1098,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutdown_done = closure; pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) { if (!pollset_has_workers(pollset)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
} }
if (!pollset->called_shutdown && !pollset_has_workers(pollset)) { if (!pollset->called_shutdown && !pollset_has_workers(pollset)) {
pollset->called_shutdown = 1; pollset->called_shutdown = 1;
@ -1288,10 +1286,8 @@ static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {} grpc_workqueue *workqueue) {}
#endif #endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
grpc_workqueue *workqueue, grpc_closure *closure, return grpc_schedule_on_exec_ctx;
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
} }
/******************************************************************************* /*******************************************************************************
@ -1534,7 +1530,7 @@ static const grpc_event_engine_vtable vtable = {
.workqueue_ref = workqueue_ref, .workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref, .workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue, .workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine, .shutdown_engine = shutdown_engine,
}; };

@ -275,9 +275,8 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
} }
#endif #endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
grpc_closure *closure, grpc_error *error) { return g_event_engine->workqueue_scheduler(workqueue);
g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
} }
#endif // GRPC_POSIX_SOCKET #endif // GRPC_POSIX_SOCKET

@ -106,8 +106,7 @@ typedef struct grpc_event_engine_vtable {
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue); grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif #endif
void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable; } grpc_event_engine_vtable;
void grpc_event_engine_init(void); void grpc_event_engine_init(void);

@ -57,7 +57,6 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
return true; return true;
} }
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0; bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0); GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
@ -67,8 +66,10 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL; exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) { while (c != NULL) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
did_something = true; did_something = true;
grpc_closure_run(exec_ctx, c, c->error_data.error); c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next; c = next;
} }
} else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) { } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
@ -76,30 +77,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
} }
} }
GPR_ASSERT(exec_ctx->active_combiner == NULL); GPR_ASSERT(exec_ctx->active_combiner == NULL);
if (exec_ctx->stealing_from_workqueue != NULL) {
if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
} else {
grpc_closure *c = exec_ctx->stolen_closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
grpc_error *error = c->error_data.error;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
grpc_exec_ctx_flush(exec_ctx);
did_something = true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0); GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return did_something; return did_something;
} }
@ -109,104 +86,21 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
} }
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error, grpc_error *error) {
grpc_workqueue *offload_target_or_null) { closure->cb(exec_ctx, closure->cb_arg, error);
GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0); GRPC_ERROR_UNREF(error);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
} else if (exec_ctx->stealing_from_workqueue == NULL) {
exec_ctx->stealing_from_workqueue = offload_target_or_null;
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
} else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
} else { /* stealing_from_workqueue == offload_target_or_null */
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
GPR_TIMER_END("grpc_exec_ctx_sched", 0);
} }
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_closure_list *list, grpc_error *error) {
grpc_workqueue *offload_target_or_null) { grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
grpc_closure_list_move(list, &exec_ctx->closure_list);
} }
void grpc_exec_ctx_global_init(void) {} void grpc_exec_ctx_global_init(void) {}
void grpc_exec_ctx_global_shutdown(void) {} void grpc_exec_ctx_global_shutdown(void) {}
#else
static gpr_mu g_mu;
static gpr_cv g_cv;
static int g_threads = 0;
static void run_closure(void *arg) { static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
grpc_closure *closure = arg; exec_ctx_run, exec_ctx_sched};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0); grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(&g_mu);
if (--g_threads == 0) {
gpr_cv_signal(&g_cv);
}
gpr_mu_unlock(&g_mu);
}
static void start_closure(grpc_closure *closure) {
gpr_thd_id id;
gpr_mu_lock(&g_mu);
g_threads++;
gpr_mu_unlock(&g_mu);
gpr_thd_new(&id, run_closure, closure, NULL);
}
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return false; }
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {}
void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
bool success,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
if (closure == NULL) return;
closure->final_data = success;
start_closure(closure);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
if (list == NULL) return;
grpc_closure *p = list->head;
while (p) {
grpc_closure *start = p;
p = grpc_closure_next(start);
start_closure(start);
}
grpc_closure_list r = GRPC_CLOSURE_LIST_INIT;
*list = r;
}
void grpc_exec_ctx_global_init(void) {
gpr_mu_init(&g_mu);
gpr_cv_init(&g_cv);
}
void grpc_exec_ctx_global_shutdown(void) {
gpr_mu_lock(&g_mu);
while (g_threads != 0) {
gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&g_mu);
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);
}
#endif

@ -66,17 +66,6 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER #ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx { struct grpc_exec_ctx {
grpc_closure_list closure_list; grpc_closure_list closure_list;
/** The workqueue we're stealing work from.
As items are queued to the execution context, we try to steal one
workqueue item and execute it inline (assuming the exec_ctx is not
finished) - doing so does not invalidate the workqueue's contract, and
provides a small latency win in cases where we get a hit */
grpc_workqueue *stealing_from_workqueue;
/** The workqueue item that was stolen from the workqueue above. When new
items are scheduled to be offloaded to that workqueue, we need to update
this like a 1-deep fifo to maintain the invariant that workqueue items
queued by one thread are started in order */
grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */ /** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner; grpc_combiner *active_combiner;
/** last active combiner in the active combiner list */ /** last active combiner in the active combiner list */
@ -89,10 +78,7 @@ struct grpc_exec_ctx {
/* initializer for grpc_exec_ctx: /* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */ prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \ #define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
{ \ { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check }
GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
finish_check \
}
#else #else
struct grpc_exec_ctx { struct grpc_exec_ctx {
bool cached_ready_to_finish; bool cached_ready_to_finish;
@ -108,6 +94,8 @@ struct grpc_exec_ctx {
#define GRPC_EXEC_CTX_INIT \ #define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL) GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
/** Flush any work that has been enqueued onto this grpc_exec_ctx. /** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held. * Caller must guarantee that no interfering locks are held.
* Returns true if work was performed, false otherwise. */ * Returns true if work was performed, false otherwise. */
@ -115,14 +103,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before /** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */ * the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
/** Add a closure to be executed in the future.
If \a offload_target_or_null is NULL, the closure will be executed at the
next exec_ctx.{finish,flush} point.
If \a offload_target_or_null is non-NULL, the closure will be scheduled
against the workqueue, and a reference to the workqueue will be consumed. */
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null);
/** Returns true if we'd like to leave this execution context as soon as /** Returns true if we'd like to leave this execution context as soon as
possible: useful for deciding whether to do something more or not depending possible: useful for deciding whether to do something more or not depending
on outside context */ on outside context */
@ -131,11 +111,6 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx);
bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
/** A finish check that is always ready to finish */ /** A finish check that is always ready to finish */
bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
/** Add a list of closures to be executed at the next flush/finish point.
* Leaves \a list empty. */
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null);
void grpc_exec_ctx_global_init(void); void grpc_exec_ctx_global_init(void);

@ -77,11 +77,19 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu); gpr_mu_unlock(&g_executor.mu);
break; break;
} else { } else {
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL); grpc_closure *c = g_executor.closures.head;
} grpc_closure_list_init(&g_executor.closures);
gpr_mu_unlock(&g_executor.mu); gpr_mu_unlock(&g_executor.mu);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(&exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
}
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
} }
}
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -112,7 +120,8 @@ static void maybe_spawn_locked() {
g_executor.pending_join = 1; g_executor.pending_join = 1;
} }
void grpc_executor_push(grpc_closure *closure, grpc_error *error) { static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
gpr_mu_lock(&g_executor.mu); gpr_mu_lock(&g_executor.mu);
if (g_executor.shutting_down == 0) { if (g_executor.shutting_down == 0) {
grpc_closure_list_append(&g_executor.closures, closure, error); grpc_closure_list_append(&g_executor.closures, closure, error);
@ -133,7 +142,15 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */ * list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */ /* Execute pending callbacks, some may be performing cleanups */
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL); grpc_closure *c = g_executor.closures.head;
grpc_closure_list_init(&g_executor.closures);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(&exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
}
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures)); GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) { if (pending_join) {
@ -141,3 +158,8 @@ void grpc_executor_shutdown() {
} }
gpr_mu_destroy(&g_executor.mu); gpr_mu_destroy(&g_executor.mu);
} }
static const grpc_closure_scheduler_vtable executor_vtable = {executor_push,
executor_push};
static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;

@ -43,9 +43,7 @@
* non-blocking solution available. */ * non-blocking solution available. */
void grpc_executor_init(); void grpc_executor_init();
/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate extern grpc_closure_scheduler *grpc_executor_scheduler;
* thread */
void grpc_executor_push(grpc_closure *closure, grpc_error *error);
/** Shutdown the executor, running all pending work as part of the call */ /** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(); void grpc_executor_shutdown();

@ -83,7 +83,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
// Drain any pending UV callbacks without blocking // Drain any pending UV callbacks without blocking
uv_run(uv_default_loop(), UV_RUN_NOWAIT); uv_run(uv_default_loop(), UV_RUN_NOWAIT);
} }
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
} }
void grpc_pollset_destroy(grpc_pollset *pollset) { void grpc_pollset_destroy(grpc_pollset *pollset) {

@ -109,7 +109,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1; pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) { if (!pollset->is_iocp_worker) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
pollset->on_shutdown = closure; pollset->on_shutdown = closure;
} }
@ -167,8 +167,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} }
if (pollset->shutting_down && pollset->on_shutdown != NULL) { if (pollset->shutting_down && pollset->on_shutdown != NULL) {
grpc_exec_ctx_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE, grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
NULL);
pollset->on_shutdown = NULL; pollset->on_shutdown = NULL;
} }
goto done; goto done;

@ -163,10 +163,9 @@ typedef struct {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) { grpc_error *error) {
request *r = rp; request *r = rp;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, r->on_done, exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out), grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
NULL);
gpr_free(r->name); gpr_free(r->name);
gpr_free(r->default_port); gpr_free(r->default_port);
gpr_free(r); gpr_free(r);
@ -185,12 +184,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done, grpc_closure *on_done,
grpc_resolved_addresses **addrs) { grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request)); request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r); grpc_closure_init(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
r->name = gpr_strdup(name); r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port); r->default_port = gpr_strdup(default_port);
r->on_done = on_done; r->on_done = on_done;
r->addrs_out = addrs; r->addrs_out = addrs;
grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
} }
void (*grpc_resolve_address)( void (*grpc_resolve_address)(

@ -98,7 +98,7 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *error; grpc_error *error;
error = handle_addrinfo_result(status, res, r->addresses); error = handle_addrinfo_result(status, res, r->addresses);
grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL); grpc_closure_sched(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints); gpr_free(r->hints);
@ -193,7 +193,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
int s; int s;
err = try_split_host_port(name, default_port, &host, &port); err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL); grpc_closure_sched(exec_ctx, on_done, err);
return; return;
} }
r = gpr_malloc(sizeof(request)); r = gpr_malloc(sizeof(request));
@ -217,7 +217,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
*addrs = NULL; *addrs = NULL;
err = GRPC_ERROR_CREATE("getaddrinfo failed"); err = GRPC_ERROR_CREATE("getaddrinfo failed");
err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s)); err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL); grpc_closure_sched(exec_ctx, on_done, err);
gpr_free(r); gpr_free(r);
gpr_free(req); gpr_free(req);
gpr_free(hints); gpr_free(hints);

@ -154,7 +154,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
grpc_exec_ctx_sched(exec_ctx, r->on_done, error, NULL); grpc_closure_sched(exec_ctx, r->on_done, error);
gpr_free(r->name); gpr_free(r->name);
gpr_free(r->default_port); gpr_free(r->default_port);
gpr_free(r); gpr_free(r);
@ -173,12 +173,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done, grpc_closure *on_done,
grpc_resolved_addresses **addresses) { grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request)); request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r); grpc_closure_init(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
r->name = gpr_strdup(name); r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port); r->default_port = gpr_strdup(default_port);
r->on_done = on_done; r->on_done = on_done;
r->addresses = addresses; r->addresses = addresses;
grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
} }
void (*grpc_resolve_address)( void (*grpc_resolve_address)(

@ -265,9 +265,8 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
if (resource_quota->step_scheduled) return; if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true; resource_quota->step_scheduled = true;
grpc_resource_quota_internal_ref(resource_quota); grpc_resource_quota_internal_ref(resource_quota);
grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner, grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure,
&resource_quota->rq_step_closure, GRPC_ERROR_NONE);
GRPC_ERROR_NONE, false);
} }
/* returns true if all allocations are completed */ /* returns true if all allocations are completed */
@ -294,7 +293,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
} }
if (resource_user->free_pool >= 0) { if (resource_user->free_pool >= 0) {
resource_user->allocating = false; resource_user->allocating = false;
grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL); grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
} else { } else {
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@ -439,7 +438,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
resource_user->new_reclaimers[destructive] = NULL; resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL); grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED);
return false; return false;
} }
resource_user->reclaimers[destructive] = closure; resource_user->reclaimers[destructive] = closure;
@ -480,10 +479,10 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru; grpc_resource_user *resource_user = ru;
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0], grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
resource_user->reclaimers[0] = NULL; resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL; resource_user->reclaimers[1] = NULL;
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
@ -496,10 +495,10 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
for (int i = 0; i < GRPC_RULIST_COUNT; i++) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i); rulist_remove(resource_user, (grpc_rulist)i);
} }
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0], grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED, NULL); GRPC_ERROR_CANCELLED);
if (resource_user->free_pool != 0) { if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool; resource_user->resource_quota->free_pool += resource_user->free_pool;
rq_step_sched(exec_ctx, resource_user->resource_quota); rq_step_sched(exec_ctx, resource_user->resource_quota);
@ -571,9 +570,12 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR, gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota); (intptr_t)resource_quota);
} }
grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota); grpc_closure_init(
&resource_quota->rq_step_closure, rq_step, resource_quota,
grpc_combiner_finally_scheduler(resource_quota->combiner, true));
grpc_closure_init(&resource_quota->rq_reclamation_done_closure, grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
rq_reclamation_done, resource_quota); rq_reclamation_done, resource_quota,
grpc_combiner_scheduler(resource_quota->combiner, false));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_quota->roots[i] = NULL; resource_quota->roots[i] = NULL;
} }
@ -614,9 +616,8 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
rq_resize_args *a = gpr_malloc(sizeof(*a)); rq_resize_args *a = gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_internal_ref(resource_quota); a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
a->size = (int64_t)size; a->size = (int64_t)size;
grpc_closure_init(&a->closure, rq_resize, a); grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure, grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
GRPC_ERROR_NONE, false);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -663,15 +664,19 @@ grpc_resource_user *grpc_resource_user_create(
resource_user->resource_quota = resource_user->resource_quota =
grpc_resource_quota_internal_ref(resource_quota); grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &ru_allocate, grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
resource_user); resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->add_to_free_pool_closure, grpc_closure_init(&resource_user->add_to_free_pool_closure,
&ru_add_to_free_pool, resource_user); &ru_add_to_free_pool, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->post_reclaimer_closure[0], grpc_closure_init(&resource_user->post_reclaimer_closure[0],
&ru_post_benign_reclaimer, resource_user); &ru_post_benign_reclaimer, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->post_reclaimer_closure[1], grpc_closure_init(&resource_user->post_reclaimer_closure[1],
&ru_post_destructive_reclaimer, resource_user); &ru_post_destructive_reclaimer, resource_user,
grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, grpc_combiner_scheduler(resource_quota->combiner, false));
resource_user); grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
gpr_mu_init(&resource_user->mu); gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1); gpr_atm_rel_store(&resource_user->refs, 1);
gpr_atm_rel_store(&resource_user->shutdown, 0); gpr_atm_rel_store(&resource_user->shutdown, 0);
@ -706,9 +711,8 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx,
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount); GPR_ASSERT(old >= amount);
if (old == amount) { if (old == amount) {
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, grpc_closure_sched(exec_ctx, &resource_user->destroy_closure,
&resource_user->destroy_closure, GRPC_ERROR_NONE, GRPC_ERROR_NONE);
false);
} }
} }
@ -724,9 +728,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) { grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, grpc_closure_sched(exec_ctx,
grpc_closure_create(ru_shutdown, resource_user), grpc_closure_create(
GRPC_ERROR_NONE, false); ru_shutdown, resource_user,
grpc_combiner_scheduler(
resource_user->resource_quota->combiner, false)),
GRPC_ERROR_NONE);
} }
} }
@ -746,12 +753,11 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
if (!resource_user->allocating) { if (!resource_user->allocating) {
resource_user->allocating = true; resource_user->allocating = true;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, grpc_closure_sched(exec_ctx, &resource_user->allocate_closure,
&resource_user->allocate_closure, GRPC_ERROR_NONE, GRPC_ERROR_NONE);
false);
} }
} else { } else {
grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
} }
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
} }
@ -770,9 +776,8 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
if (is_bigger_than_zero && was_zero_or_negative && if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) { !resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true; resource_user->added_to_free_pool = true;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure,
&resource_user->add_to_free_pool_closure, GRPC_ERROR_NONE);
GRPC_ERROR_NONE, false);
} }
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
ru_unref_by(exec_ctx, resource_user, (gpr_atm)size); ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
@ -784,9 +789,9 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_closure *closure) { grpc_closure *closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL); GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure; resource_user->new_reclaimers[destructive] = closure;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, grpc_closure_sched(exec_ctx,
&resource_user->post_reclaimer_closure[destructive], &resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE, false); GRPC_ERROR_NONE);
} }
void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
@ -795,18 +800,20 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name); resource_user->resource_quota->name, resource_user->name);
} }
grpc_combiner_execute( grpc_closure_sched(
exec_ctx, resource_user->resource_quota->combiner, exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
&resource_user->resource_quota->rq_reclamation_done_closure, GRPC_ERROR_NONE);
GRPC_ERROR_NONE, false);
} }
void grpc_resource_user_slice_allocator_init( void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator, grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) { grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices, grpc_closure_init(
slice_allocator); &slice_allocator->on_allocated, ru_allocated_slices, slice_allocator,
grpc_closure_init(&slice_allocator->on_done, cb, p); grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
grpc_closure_init(
&slice_allocator->on_done, cb, p,
grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
slice_allocator->resource_user = resource_user; slice_allocator->resource_user = resource_user;
} }

@ -131,7 +131,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu); gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) { if (info->has_pending_iocp) {
info->has_pending_iocp = 0; info->has_pending_iocp = 0;
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
info->closure = closure; info->closure = closure;
} }
@ -154,7 +154,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
GPR_ASSERT(!info->has_pending_iocp); GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu); gpr_mu_lock(&socket->state_mu);
if (info->closure) { if (info->closure) {
grpc_exec_ctx_sched(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE);
info->closure = NULL; info->closure = NULL;
} else { } else {
info->has_pending_iocp = 1; info->has_pending_iocp = 1;

@ -265,7 +265,7 @@ finish:
grpc_channel_args_destroy(ac->channel_args); grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac); gpr_free(ac);
} }
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); grpc_closure_sched(exec_ctx, closure, error);
} }
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@ -294,7 +294,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); grpc_closure_sched(exec_ctx, closure, error);
return; return;
} }
if (dsmode == GRPC_DSMODE_IPV4) { if (dsmode == GRPC_DSMODE_IPV4) {
@ -303,7 +303,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr = &addr4_copy; addr = &addr4_copy;
} }
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) { if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); grpc_closure_sched(exec_ctx, closure, error);
return; return;
} }
@ -321,14 +321,13 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (err >= 0) { if (err >= 0) {
*ep = *ep =
grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str); grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
goto done; goto done;
} }
if (errno != EWOULDBLOCK && errno != EINPROGRESS) { if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error"); grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"), grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
NULL);
goto done; goto done;
} }
@ -343,8 +342,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr_str = NULL; addr_str = NULL;
gpr_mu_init(&ac->mu); gpr_mu_init(&ac->mu);
ac->refs = 2; ac->refs = 2;
ac->write_closure.cb = on_writable; grpc_closure_init(&ac->write_closure, on_writable, ac,
ac->write_closure.cb_arg = ac; grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args); ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) { if (grpc_tcp_trace) {

@ -110,7 +110,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
if (done) { if (done) {
uv_tcp_connect_cleanup(&exec_ctx, connect); uv_tcp_connect_cleanup(&exec_ctx, connect);
} }
grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL); grpc_closure_sched(&exec_ctx, closure, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }

@ -129,7 +129,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(exec_ctx, ac, socket); async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when /* If the connection was aborted, the callback was already called when
the deadline was met. */ the deadline was met. */
grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL); grpc_closure_sched(exec_ctx, on_done, error);
} }
/* Tries to issue one async connection, then schedules both an IOCP /* Tries to issue one async connection, then schedules both an IOCP
@ -227,7 +227,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
ac->addr_name = grpc_sockaddr_to_uri(addr); ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint; ac->endpoint = endpoint;
ac->resource_quota = resource_quota; ac->resource_quota = resource_quota;
grpc_closure_init(&ac->on_connect, on_connect, ac); grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac, grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
@ -247,7 +247,7 @@ failure:
closesocket(sock); closesocket(sock);
} }
grpc_resource_quota_internal_unref(exec_ctx, resource_quota); grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL); grpc_closure_sched(exec_ctx, on_done, final_error);
} }
#endif /* GRPC_WINSOCK_SOCKET */ #endif /* GRPC_WINSOCK_SOCKET */

@ -316,7 +316,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = false; tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else { } else {
grpc_exec_ctx_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
} }
} }
@ -460,11 +460,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) { if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0); GPR_TIMER_END("tcp_write", 0);
grpc_exec_ctx_sched(exec_ctx, cb, grpc_closure_sched(exec_ctx, cb,
grpc_fd_is_shutdown(tcp->em_fd) grpc_fd_is_shutdown(tcp->em_fd)
? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp) ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
: GRPC_ERROR_NONE, : GRPC_ERROR_NONE);
NULL);
return; return;
} }
tcp->outgoing_buffer = buf; tcp->outgoing_buffer = buf;
@ -484,7 +483,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_log(GPR_DEBUG, "write: %s", str); gpr_log(GPR_DEBUG, "write: %s", str);
grpc_error_free_string(str); grpc_error_free_string(str);
} }
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); grpc_closure_sched(exec_ctx, cb, error);
} }
GPR_TIMER_END("tcp_write", 0); GPR_TIMER_END("tcp_write", 0);
@ -552,10 +551,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd; tcp->em_fd = em_fd;
tcp->read_closure.cb = tcp_handle_read; grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp,
tcp->read_closure.cb_arg = tcp; grpc_schedule_on_exec_ctx);
tcp->write_closure.cb = tcp_handle_write; grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp,
tcp->write_closure.cb_arg = tcp; grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer); grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init( grpc_resource_user_slice_allocator_init(

@ -208,7 +208,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown); GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
gpr_mu_destroy(&s->mu); gpr_mu_destroy(&s->mu);
@ -254,8 +254,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_tcp_listener *sp; grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) { for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr); grpc_unlink_if_unix_domain_socket(&sp->addr);
sp->destroyed_closure.cb = destroyed_port; grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
sp->destroyed_closure.cb_arg = s; grpc_schedule_on_exec_ctx);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown"); "tcp_listener_shutdown");
} }
@ -723,8 +723,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); "clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
sp->read_closure.cb = on_read; grpc_closure_init(&sp->read_closure, on_read, sp,
sp->read_closure.cb_arg = sp; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++; s->active_ports++;
sp = sp->next; sp = sp->next;
@ -733,8 +733,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
} }
sp->read_closure.cb = on_read; grpc_closure_init(&sp->read_closure, on_read, sp,
sp->read_closure.cb_arg = sp; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++; s->active_ports++;
sp = sp->next; sp = sp->next;
@ -760,7 +760,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s); grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu); gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s); tcp_server_destroy(exec_ctx, s);
} }

@ -126,7 +126,7 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
while (s->head) { while (s->head) {
@ -170,7 +170,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */ /* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL); grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting);
if (exec_ctx == NULL) { if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx); grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s); tcp_server_destroy(&local_exec_ctx, s);

@ -162,11 +162,12 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) { grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s), grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s,
GRPC_ERROR_NONE, NULL); grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} }
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
@ -204,7 +205,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) { if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s); grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu); gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s); tcp_server_destroy(exec_ctx, s);
} }
@ -465,7 +466,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->new_socket = INVALID_SOCKET; sp->new_socket = INVALID_SOCKET;
sp->port = port; sp->port = port;
sp->port_index = port_index; sp->port_index = port_index;
grpc_closure_init(&sp->on_accept, on_accept, sp); grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->socket); GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
*listener = sp; *listener = sp;

@ -169,7 +169,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// nread < 0: Error // nread < 0: Error
error = GRPC_ERROR_CREATE("TCP Read failed"); error = GRPC_ERROR_CREATE("TCP Read failed");
} }
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); grpc_closure_sched(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -190,7 +190,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
error = GRPC_ERROR_CREATE("TCP Read failed at start"); error = GRPC_ERROR_CREATE("TCP Read failed at start");
error = error =
grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status)); grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); grpc_closure_sched(exec_ctx, cb, error);
} }
if (grpc_tcp_trace) { if (grpc_tcp_trace) {
const char *str = grpc_error_string(error); const char *str = grpc_error_string(error);
@ -217,7 +217,7 @@ static void write_callback(uv_write_t *req, int status) {
gpr_free(tcp->write_buffers); gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, tcp->resource_user, grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count); sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); grpc_closure_sched(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -243,8 +243,8 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
} }
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_exec_ctx_sched(exec_ctx, cb, grpc_closure_sched(exec_ctx, cb,
GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); GRPC_ERROR_CREATE("TCP socket is shutting down"));
return; return;
} }
@ -254,7 +254,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (tcp->write_slices->count == 0) { if (tcp->write_slices->count == 0) {
// No slices means we don't have to do anything, // No slices means we don't have to do anything,
// and libuv doesn't like empty writes // and libuv doesn't like empty writes
grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
return; return;
} }

@ -188,7 +188,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
tcp->read_cb = NULL; tcp->read_cb = NULL;
TCP_UNREF(exec_ctx, tcp, "read"); TCP_UNREF(exec_ctx, tcp, "read");
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); grpc_closure_sched(exec_ctx, cb, error);
} }
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -202,8 +202,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer; WSABUF buffer;
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_exec_ctx_sched(exec_ctx, cb, grpc_closure_sched(exec_ctx, cb,
GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); GRPC_ERROR_CREATE("TCP socket is shutting down"));
return; return;
} }
@ -227,7 +227,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */ /* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) { if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read; info->bytes_transfered = bytes_read;
grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
return; return;
} }
@ -240,8 +240,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError(); int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) { if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error; info->wsa_error = wsa_error;
grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, grpc_closure_sched(exec_ctx, &tcp->on_read,
GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL); GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
return; return;
} }
} }
@ -272,7 +272,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
} }
TCP_UNREF(exec_ctx, tcp, "write"); TCP_UNREF(exec_ctx, tcp, "write");
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); grpc_closure_sched(exec_ctx, cb, error);
} }
/* Initiates a write. */ /* Initiates a write. */
@ -290,8 +290,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len; size_t len;
if (tcp->shutting_down) { if (tcp->shutting_down) {
grpc_exec_ctx_sched(exec_ctx, cb, grpc_closure_sched(exec_ctx, cb,
GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); GRPC_ERROR_CREATE("TCP socket is shutting down"));
return; return;
} }
@ -322,7 +322,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *error = status == 0 grpc_error *error = status == 0
? GRPC_ERROR_NONE ? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend"); : GRPC_WSA_ERROR(info->wsa_error, "WSASend");
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); grpc_closure_sched(exec_ctx, cb, error);
if (allocated) gpr_free(allocated); if (allocated) gpr_free(allocated);
return; return;
} }
@ -340,8 +340,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError(); int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) { if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(exec_ctx, tcp, "write"); TCP_UNREF(exec_ctx, tcp, "write");
grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"), grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
NULL);
return; return;
} }
} }
@ -424,8 +423,8 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
tcp->socket = socket; tcp->socket = socket;
gpr_mu_init(&tcp->mu); gpr_mu_init(&tcp->mu);
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
grpc_closure_init(&tcp->on_read, on_read, tcp); grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
grpc_closure_init(&tcp->on_write, on_write, tcp); grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string); tcp->peer_string = gpr_strdup(peer_string);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */ /* Tell network status tracking code about the new endpoint */

@ -184,22 +184,22 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
shard_type *shard = &g_shards[shard_idx(timer)]; shard_type *shard = &g_shards[shard_idx(timer)];
GPR_ASSERT(deadline.clock_type == g_clock_type); GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type); GPR_ASSERT(now.clock_type == g_clock_type);
grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg); grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
grpc_schedule_on_exec_ctx);
timer->deadline = deadline; timer->deadline = deadline;
timer->triggered = 0; timer->triggered = 0;
if (!g_initialized) { if (!g_initialized) {
timer->triggered = 1; timer->triggered = 1;
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, &timer->closure, exec_ctx, &timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"), GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
NULL);
return; return;
} }
if (gpr_time_cmp(deadline, now) <= 0) { if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1; timer->triggered = 1;
grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
return; return;
} }
@ -251,7 +251,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[shard_idx(timer)]; shard_type *shard = &g_shards[shard_idx(timer)];
gpr_mu_lock(&shard->mu); gpr_mu_lock(&shard->mu);
if (!timer->triggered) { if (!timer->triggered) {
grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL); grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
timer->triggered = 1; timer->triggered = 1;
if (timer->heap_index == INVALID_HEAP_INDEX) { if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer); list_remove(timer);
@ -317,7 +317,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer; grpc_timer *timer;
gpr_mu_lock(&shard->mu); gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) { while ((timer = pop_one(shard, now))) {
grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL); grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error));
n++; n++;
} }
*new_min_deadline = compute_min_deadline(shard); *new_min_deadline = compute_min_deadline(shard);

@ -55,7 +55,7 @@ void run_expired_timer(uv_timer_t *handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(!timer->triggered); GPR_ASSERT(!timer->triggered);
timer->triggered = 1; timer->triggered = 1;
grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle); stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -65,10 +65,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void *timer_cb_arg, gpr_timespec now) { void *timer_cb_arg, gpr_timespec now) {
uint64_t timeout; uint64_t timeout;
uv_timer_t *uv_timer; uv_timer_t *uv_timer;
grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg); grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
grpc_schedule_on_exec_ctx);
if (gpr_time_cmp(deadline, now) <= 0) { if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1; timer->triggered = 1;
grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
return; return;
} }
timer->triggered = 0; timer->triggered = 0;
@ -83,7 +84,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (!timer->triggered) { if (!timer->triggered) {
timer->triggered = 1; timer->triggered = 1;
grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL); grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer); stop_uv_timer((uv_timer_t *)timer->uv_timer);
} }
} }

@ -126,7 +126,7 @@ grpc_udp_server *grpc_udp_server_create(void) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
if (s->shutdown_complete != NULL) { if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
} }
gpr_mu_destroy(&s->mu); gpr_mu_destroy(&s->mu);
@ -170,8 +170,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
for (sp = s->head; sp; sp = sp->next) { for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr); grpc_unlink_if_unix_domain_socket(&sp->addr);
sp->destroyed_closure.cb = destroyed_port; grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
sp->destroyed_closure.cb_arg = s; grpc_schedule_on_exec_ctx);
/* Call the orphan_cb to signal that the FD is about to be closed and /* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. */ * should no longer be used. */
@ -446,8 +446,8 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
for (i = 0; i < pollset_count; i++) { for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
} }
sp->read_closure.cb = on_read; grpc_closure_init(&sp->read_closure, on_read, sp,
sp->read_closure.cb_arg = sp; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++; s->active_ports++;

@ -72,17 +72,16 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif #endif
/** Add a work item to a workqueue. Items added to a work queue will be started /** Fetch the workqueue closure scheduler. Items added to a work queue will be
in approximately the order they were enqueued, on some thread that may or started in approximately the order they were enqueued, on some thread that
may not be the current thread. Successive closures enqueued onto a workqueue may or may not be the current thread. Successive closures enqueued onto a
MAY be executed concurrently. workqueue MAY be executed concurrently.
It is generally more expensive to add a closure to a workqueue than to the It is generally more expensive to add a closure to a workqueue than to the
execution context, both in terms of CPU work and in execution latency. execution context, both in terms of CPU work and in execution latency.
Use work queues when it's important that other threads be given a chance to Use work queues when it's important that other threads be given a chance to
tackle some workload. */ tackle some workload. */
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
grpc_closure *closure, grpc_error *error);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */ #endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */

@ -58,9 +58,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif #endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
grpc_closure *closure, grpc_error *error) { return grpc_schedule_on_exec_ctx;
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
} }
#endif /* GPR_UV */ #endif /* GPR_UV */

@ -56,9 +56,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif #endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
grpc_closure *closure, grpc_error *error) { return grpc_schedule_on_exec_ctx;
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
} }
#endif /* GPR_WINDOWS */ #endif /* GPR_WINDOWS */

@ -113,8 +113,9 @@ static void md_only_test_get_request_metadata(
if (c->is_async) { if (c->is_async) {
grpc_credentials_metadata_request *cb_arg = grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data); grpc_credentials_metadata_request_create(creds, cb, user_data);
grpc_executor_push( grpc_closure_sched(exec_ctx,
grpc_closure_create(on_simulated_token_fetch_done, cb_arg), grpc_closure_create(on_simulated_token_fetch_done,
cb_arg, grpc_executor_scheduler),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else { } else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL); cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL);

@ -130,7 +130,8 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_get( grpc_httpcli_get(
&exec_ctx, &context, &detector.pollent, resource_quota, &request, &exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector), grpc_closure_create(on_compute_engine_detection_http_response, &detector,
grpc_schedule_on_exec_ctx),
&detector.response); &detector.response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
@ -155,7 +156,8 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_destroy(&context); grpc_httpcli_context_destroy(&context);
grpc_closure_init(&destroy_closure, destroy_pollset, grpc_closure_init(&destroy_closure, destroy_pollset,
grpc_polling_entity_pollset(&detector.pollent)); grpc_polling_entity_pollset(&detector.pollent),
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_pollset_shutdown(&exec_ctx,
grpc_polling_entity_pollset(&detector.pollent), grpc_polling_entity_pollset(&detector.pollent),
&destroy_closure); &destroy_closure);

@ -677,7 +677,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx), grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]); &ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota); grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json); grpc_json_destroy(json);
@ -778,7 +778,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
*(path_prefix++) = '\0'; *(path_prefix++) = '\0';
gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss); gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
} }
http_cb = grpc_closure_create(on_keys_retrieved, ctx); http_cb =
grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_KEYS; rsp_idx = HTTP_RESPONSE_KEYS;
} else { } else {
req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss); req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
@ -790,7 +791,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s%s", path_prefix, gpr_asprintf(&req.http.path, "/%s%s", path_prefix,
GRPC_OPENID_CONFIG_URL_SUFFIX); GRPC_OPENID_CONFIG_URL_SUFFIX);
} }
http_cb = grpc_closure_create(on_openid_config_retrieved, ctx); http_cb = grpc_closure_create(on_openid_config_retrieved, ctx,
grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_OPENID; rsp_idx = HTTP_RESPONSE_OPENID;
} }

@ -312,8 +312,9 @@ static void compute_engine_fetch_oauth2(
extreme memory pressure. */ extreme memory pressure. */
grpc_resource_quota *resource_quota = grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials"); grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request, grpc_httpcli_get(
deadline, grpc_closure_create(response_cb, metadata_req), exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota); grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
} }
@ -368,9 +369,10 @@ static void refresh_token_fetch_oauth2(
extreme memory pressure. */ extreme memory pressure. */
grpc_resource_quota *resource_quota = grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials_refresh"); grpc_resource_quota_create("oauth2_credentials_refresh");
grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota, grpc_httpcli_post(
&request, body, strlen(body), deadline, exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
grpc_closure_create(response_cb, metadata_req), strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota); grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body); gpr_free(body);

@ -146,7 +146,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
} }
} }
ep->read_buffer = NULL; ep->read_buffer = NULL;
grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL); grpc_closure_sched(exec_ctx, ep->read_cb, error);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
} }
@ -329,10 +329,9 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) { if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */ /* TODO(yangg) do different things according to the error type? */
grpc_slice_buffer_reset_and_unref(&ep->output_buffer); grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, cb, exec_ctx, cb,
grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result), grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result));
NULL);
GPR_TIMER_END("secure_endpoint.endpoint_write", 0); GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
return; return;
} }
@ -417,7 +416,7 @@ grpc_endpoint *grpc_secure_endpoint_create(
grpc_slice_buffer_init(&ep->output_buffer); grpc_slice_buffer_init(&ep->output_buffer);
grpc_slice_buffer_init(&ep->source_buffer); grpc_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL; ep->read_buffer = NULL;
grpc_closure_init(&ep->on_read, on_read, ep); grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
gpr_mu_init(&ep->protector_mu); gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1); gpr_ref_init(&ep->ref, 1);
return &ep->base; return &ep->base;

@ -134,9 +134,9 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context, grpc_auth_context **auth_context,
grpc_closure *on_peer_checked) { grpc_closure *on_peer_checked) {
if (sc == NULL) { if (sc == NULL) {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, on_peer_checked, exec_ctx, on_peer_checked,
GRPC_ERROR_CREATE("cannot check peer -- no security connector"), NULL); GRPC_ERROR_CREATE("cannot check peer -- no security connector"));
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} else { } else {
sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
@ -273,7 +273,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE); GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end: end:
grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }
@ -508,7 +508,7 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
? c->overridden_target_name ? c->overridden_target_name
: c->target_name, : c->target_name,
&peer, auth_context); &peer, auth_context);
grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
} }
@ -518,7 +518,7 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure *on_peer_checked) { grpc_closure *on_peer_checked) {
grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context); grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer); tsi_peer_destruct(&peer);
grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); grpc_closure_sched(exec_ctx, on_peer_checked, error);
} }
static void add_shallow_auth_property_to_peer(tsi_peer *peer, static void add_shallow_auth_property_to_peer(tsi_peer *peer,

@ -136,7 +136,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
h->shutdown = true; h->shutdown = true;
} }
// Invoke callback. // Invoke callback.
grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL); grpc_closure_sched(exec_ctx, h->on_handshake_done, error);
} }
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
@ -173,7 +173,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1); grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
grpc_channel_args_destroy(tmp_args); grpc_channel_args_destroy(tmp_args);
// Invoke callback. // Invoke callback.
grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
// Set shutdown to true so that subsequent calls to // Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing. // security_handshaker_shutdown() do nothing.
h->shutdown = true; h->shutdown = true;
@ -392,10 +392,13 @@ static grpc_handshaker *security_handshaker_create(
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE; h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size); h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
grpc_closure_init(&h->on_handshake_data_sent_to_peer, grpc_closure_init(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h); on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&h->on_handshake_data_received_from_peer, grpc_closure_init(&h->on_handshake_data_received_from_peer,
on_handshake_data_received_from_peer, h); on_handshake_data_received_from_peer, h,
grpc_closure_init(&h->on_peer_checked, on_peer_checked, h); grpc_schedule_on_exec_ctx);
grpc_closure_init(&h->on_peer_checked, on_peer_checked, h,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&h->left_overs); grpc_slice_buffer_init(&h->left_overs);
grpc_slice_buffer_init(&h->outgoing); grpc_slice_buffer_init(&h->outgoing);
return &h->base; return &h->base;
@ -418,9 +421,8 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_tcp_server_acceptor *acceptor, grpc_tcp_server_acceptor *acceptor,
grpc_closure *on_handshake_done, grpc_closure *on_handshake_done,
grpc_handshaker_args *args) { grpc_handshaker_args *args) {
grpc_exec_ctx_sched(exec_ctx, on_handshake_done, grpc_closure_sched(exec_ctx, on_handshake_done,
GRPC_ERROR_CREATE("Failed to create security handshaker"), GRPC_ERROR_CREATE("Failed to create security handshaker"));
NULL);
} }
static const grpc_handshaker_vtable fail_handshaker_vtable = { static const grpc_handshaker_vtable fail_handshaker_vtable = {

@ -132,7 +132,7 @@ static void on_md_processing_done(
grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md, grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
elem); elem);
grpc_metadata_array_destroy(&calld->md); grpc_metadata_array_destroy(&calld->md);
grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL); grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
} else { } else {
grpc_slice message; grpc_slice message;
grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op)); grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op));
@ -148,13 +148,13 @@ static void on_md_processing_done(
calld->transport_op->send_message = NULL; calld->transport_op->send_message = NULL;
} }
calld->transport_op->send_trailing_metadata = NULL; calld->transport_op->send_trailing_metadata = NULL;
close_op->on_complete = grpc_closure_create(destroy_op, close_op); close_op->on_complete =
grpc_closure_create(destroy_op, close_op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_close(close_op, status, &message); grpc_transport_stream_op_add_close(close_op, status, &message);
grpc_call_next_op(&exec_ctx, elem, close_op); grpc_call_next_op(&exec_ctx, elem, close_op);
grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, grpc_closure_sched(&exec_ctx, calld->on_done_recv,
grpc_error_set_int(GRPC_ERROR_CREATE(error_details), grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status), GRPC_ERROR_INT_GRPC_STATUS, status));
NULL);
} }
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
@ -174,8 +174,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
return; return;
} }
} }
grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
NULL);
} }
static void set_recv_ops_md_callbacks(grpc_call_element *elem, static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@ -214,7 +213,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */ /* initialize members */
memset(calld, 0, sizeof(*calld)); memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem); grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem,
grpc_schedule_on_exec_ctx);
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) { if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
args->context[GRPC_CONTEXT_SECURITY].destroy( args->context[GRPC_CONTEXT_SECURITY].destroy(

@ -794,7 +794,8 @@ static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
memset(&tc->op, 0, sizeof(tc->op)); memset(&tc->op, 0, sizeof(tc->op));
tc->op.cancel_error = tc->error; tc->op.cancel_error = tc->error;
/* reuse closure to catch completion */ /* reuse closure to catch completion */
grpc_closure_init(&tc->closure, done_termination, tc); grpc_closure_init(&tc->closure, done_termination, tc,
grpc_schedule_on_exec_ctx);
tc->op.on_complete = &tc->closure; tc->op.on_complete = &tc->closure;
execute_op(exec_ctx, tc->call, &tc->op); execute_op(exec_ctx, tc->call, &tc->op);
} }
@ -804,7 +805,8 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
memset(&tc->op, 0, sizeof(tc->op)); memset(&tc->op, 0, sizeof(tc->op));
tc->op.close_error = tc->error; tc->op.close_error = tc->error;
/* reuse closure to catch completion */ /* reuse closure to catch completion */
grpc_closure_init(&tc->closure, done_termination, tc); grpc_closure_init(&tc->closure, done_termination, tc,
grpc_schedule_on_exec_ctx);
tc->op.on_complete = &tc->closure; tc->op.on_complete = &tc->closure;
execute_op(exec_ctx, tc->call, &tc->op); execute_op(exec_ctx, tc->call, &tc->op);
} }
@ -814,13 +816,13 @@ static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error); set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error);
if (tc->type == TC_CANCEL) { if (tc->type == TC_CANCEL) {
grpc_closure_init(&tc->closure, send_cancel, tc); grpc_closure_init(&tc->closure, send_cancel, tc, grpc_schedule_on_exec_ctx);
GRPC_CALL_INTERNAL_REF(tc->call, "cancel"); GRPC_CALL_INTERNAL_REF(tc->call, "cancel");
} else if (tc->type == TC_CLOSE) { } else if (tc->type == TC_CLOSE) {
grpc_closure_init(&tc->closure, send_close, tc); grpc_closure_init(&tc->closure, send_close, tc, grpc_schedule_on_exec_ctx);
GRPC_CALL_INTERNAL_REF(tc->call, "close"); GRPC_CALL_INTERNAL_REF(tc->call, "close");
} }
grpc_exec_ctx_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE);
return GRPC_CALL_OK; return GRPC_CALL_OK;
} }
@ -1138,8 +1140,8 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
} else { } else {
*call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0); *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
} }
grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl,
bctl); grpc_schedule_on_exec_ctx);
continue_receiving_slices(exec_ctx, bctl); continue_receiving_slices(exec_ctx, bctl);
} }
} }
@ -1251,9 +1253,10 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
call->has_initial_md_been_received = true; call->has_initial_md_been_received = true;
if (call->saved_receiving_stream_ready_bctlp != NULL) { if (call->saved_receiving_stream_ready_bctlp != NULL) {
grpc_closure *saved_rsr_closure = grpc_closure_create( grpc_closure *saved_rsr_closure = grpc_closure_create(
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp); receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
grpc_schedule_on_exec_ctx);
call->saved_receiving_stream_ready_bctlp = NULL; call->saved_receiving_stream_ready_bctlp = NULL;
grpc_exec_ctx_sched(exec_ctx, saved_rsr_closure, error, NULL); grpc_closure_sched(exec_ctx, saved_rsr_closure, error);
} }
gpr_mu_unlock(&call->mu); gpr_mu_unlock(&call->mu);
@ -1558,7 +1561,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->received_initial_metadata = 1; call->received_initial_metadata = 1;
call->buffered_metadata[0] = op->data.recv_initial_metadata; call->buffered_metadata[0] = op->data.recv_initial_metadata;
grpc_closure_init(&call->receiving_initial_metadata_ready, grpc_closure_init(&call->receiving_initial_metadata_ready,
receiving_initial_metadata_ready, bctl); receiving_initial_metadata_ready, bctl,
grpc_schedule_on_exec_ctx);
bctl->recv_initial_metadata = 1; bctl->recv_initial_metadata = 1;
stream_op->recv_initial_metadata = stream_op->recv_initial_metadata =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@ -1581,7 +1585,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->receiving_buffer = op->data.recv_message; call->receiving_buffer = op->data.recv_message;
stream_op->recv_message = &call->receiving_stream; stream_op->recv_message = &call->receiving_stream;
grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready, grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
bctl); bctl, grpc_schedule_on_exec_ctx);
stream_op->recv_message_ready = &call->receiving_stream_ready; stream_op->recv_message_ready = &call->receiving_stream_ready;
num_completion_callbacks_needed++; num_completion_callbacks_needed++;
break; break;
@ -1646,7 +1650,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed); gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
stream_op->context = call->context; stream_op->context = call->context;
grpc_closure_init(&bctl->finish_batch, finish_batch, bctl); grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch; stream_op->on_complete = &bctl->finish_batch;
gpr_mu_unlock(&call->mu); gpr_mu_unlock(&call->mu);

@ -71,7 +71,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GPR_ASSERT(reserved == NULL); GPR_ASSERT(reserved == NULL);
pr->tag = tag; pr->tag = tag;
pr->cq = cq; pr->cq = cq;
grpc_closure_init(&pr->closure, ping_done, pr); grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
op->send_ping = &pr->closure; op->send_ping = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq); op->bind_pollset = grpc_cq_pollset(cq);
grpc_cq_begin_op(cq, tag); grpc_cq_begin_op(cq, tag);

@ -168,7 +168,8 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
#ifndef NDEBUG #ifndef NDEBUG
cc->outstanding_tag_count = 0; cc->outstanding_tag_count = 0;
#endif #endif
grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc); grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc,
grpc_schedule_on_exec_ctx);
GPR_TIMER_END("grpc_completion_queue_create", 0); GPR_TIMER_END("grpc_completion_queue_create", 0);

@ -98,16 +98,16 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->on_connectivity_state_change) { if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN); GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN; *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
grpc_exec_ctx_sched(exec_ctx, op->on_connectivity_state_change, grpc_closure_sched(exec_ctx, op->on_connectivity_state_change,
GRPC_ERROR_NONE, NULL); GRPC_ERROR_NONE);
} }
if (op->send_ping != NULL) { if (op->send_ping != NULL) {
grpc_exec_ctx_sched(exec_ctx, op->send_ping, grpc_closure_sched(exec_ctx, op->send_ping,
GRPC_ERROR_CREATE("lame client channel"), NULL); GRPC_ERROR_CREATE("lame client channel"));
} }
GRPC_ERROR_UNREF(op->disconnect_with_error); GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != NULL) { if (op->on_consumed != NULL) {
grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
} }
} }

@ -278,7 +278,8 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
int send_goaway, grpc_error *send_disconnect) { int send_goaway, grpc_error *send_disconnect) {
struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc)); struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
grpc_closure_init(&sc->closure, shutdown_cleanup, sc); grpc_closure_init(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure); grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
grpc_channel_element *elem; grpc_channel_element *elem;
@ -346,9 +347,9 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE, grpc_schedule_on_exec_ctx);
NULL); grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
} }
} }
@ -440,8 +441,8 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
orphan_channel(chand); orphan_channel(chand);
server_ref(chand->server); server_ref(chand->server);
maybe_finish_shutdown(exec_ctx, chand->server); maybe_finish_shutdown(exec_ctx, chand->server);
chand->finish_destroy_channel_closure.cb = finish_destroy_channel; grpc_closure_init(&chand->finish_destroy_channel_closure,
chand->finish_destroy_channel_closure.cb_arg = chand; finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) { if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error); const char *msg = grpc_error_string(error);
@ -545,8 +546,9 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, error, NULL); grpc_schedule_on_exec_ctx);
grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error);
return; return;
} }
@ -590,9 +592,9 @@ static void finish_start_new_rpc(
gpr_mu_lock(&calld->mu_state); gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE, grpc_schedule_on_exec_ctx);
NULL); grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
return; return;
} }
@ -607,7 +609,8 @@ static void finish_start_new_rpc(
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE; op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message = &calld->payload; op.data.recv_message = &calld->payload;
grpc_closure_init(&calld->publish, publish_new_rpc, elem); grpc_closure_init(&calld->publish, publish_new_rpc, elem,
grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1, grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
&calld->publish); &calld->publish);
break; break;
@ -813,9 +816,10 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
if (calld->state == NOT_STARTED) { if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, grpc_schedule_on_exec_ctx);
GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else if (calld->state == PENDING) { } else if (calld->state == PENDING) {
calld->state = ZOMBIED; calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
@ -851,7 +855,8 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA; op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata = &calld->initial_metadata; op.data.recv_initial_metadata = &calld->initial_metadata;
grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem); grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1, grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
&calld->got_initial_metadata); &calld->got_initial_metadata);
} }
@ -887,7 +892,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&calld->mu_state); gpr_mu_init(&calld->mu_state);
grpc_closure_init(&calld->server_on_recv_initial_metadata, grpc_closure_init(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem); server_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
server_ref(chand->server); server_ref(chand->server);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
@ -926,7 +932,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->registered_methods = NULL; chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE; chand->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_closure_init(&chand->channel_connectivity_changed, grpc_closure_init(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand); channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -1278,7 +1285,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* Shutdown listeners */ /* Shutdown listeners */
for (l = server->listeners; l; l = l->next) { for (l = server->listeners; l; l = l->next) {
grpc_closure_init(&l->destroy_done, listener_destroy_done, server); grpc_closure_init(&l->destroy_done, listener_destroy_done, server,
grpc_schedule_on_exec_ctx);
l->destroy(&exec_ctx, server, l->arg, &l->destroy_done); l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
} }
@ -1384,9 +1392,10 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_closure_init( grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, grpc_schedule_on_exec_ctx);
GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else { } else {
GPR_ASSERT(calld->state == PENDING); GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED; calld->state = ACTIVATED;

@ -81,7 +81,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
} else { } else {
error = GRPC_ERROR_CREATE("Shutdown connectivity owner"); error = GRPC_ERROR_CREATE("Shutdown connectivity owner");
} }
grpc_exec_ctx_sched(exec_ctx, w->notify, error, NULL); grpc_closure_sched(exec_ctx, w->notify, error);
gpr_free(w); gpr_free(w);
} }
GRPC_ERROR_UNREF(tracker->current_error); GRPC_ERROR_UNREF(tracker->current_error);
@ -121,7 +121,7 @@ bool grpc_connectivity_state_notify_on_state_change(
if (current == NULL) { if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers; grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) { if (w != NULL && w->notify == notify) {
grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL); grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
tracker->watchers = w->next; tracker->watchers = w->next;
gpr_free(w); gpr_free(w);
return false; return false;
@ -129,7 +129,7 @@ bool grpc_connectivity_state_notify_on_state_change(
while (w != NULL) { while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next; grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) { if (rm_candidate != NULL && rm_candidate->notify == notify) {
grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL); grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
w->next = w->next->next; w->next = w->next->next;
gpr_free(rm_candidate); gpr_free(rm_candidate);
return false; return false;
@ -140,8 +140,8 @@ bool grpc_connectivity_state_notify_on_state_change(
} else { } else {
if (tracker->current_state != *current) { if (tracker->current_state != *current) {
*current = tracker->current_state; *current = tracker->current_state;
grpc_exec_ctx_sched(exec_ctx, notify, grpc_closure_sched(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error), NULL); GRPC_ERROR_REF(tracker->current_error));
} else { } else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w)); grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
w->current = current; w->current = current;
@ -191,8 +191,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify); w->notify);
} }
grpc_exec_ctx_sched(exec_ctx, w->notify, grpc_closure_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL); GRPC_ERROR_REF(tracker->current_error));
gpr_free(w); gpr_free(w);
} }
} }

@ -68,7 +68,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) { grpc_stream_refcount *refcount) {
#endif #endif
if (gpr_unref(&refcount->refs)) { if (gpr_unref(&refcount->refs)) {
grpc_exec_ctx_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
} }
} }
@ -82,7 +82,7 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg) { grpc_iomgr_cb_func cb, void *cb_arg) {
#endif #endif
gpr_ref_init(&refcount->refs, initial_refs); gpr_ref_init(&refcount->refs, initial_refs);
grpc_closure_init(&refcount->destroy, cb, cb_arg); grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
} }
static void move64(uint64_t *from, uint64_t *to) { static void move64(uint64_t *from, uint64_t *to) {
@ -168,11 +168,10 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx, void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op, grpc_transport_stream_op *op,
grpc_error *error) { grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error));
NULL); grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
grpc_exec_ctx_sched(exec_ctx, op->recv_initial_metadata_ready, GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error), NULL); grpc_closure_sched(exec_ctx, op->on_complete, error);
grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL);
} }
typedef struct { typedef struct {
@ -196,7 +195,8 @@ static void add_error(grpc_transport_stream_op *op, grpc_error **which,
cmd = gpr_malloc(sizeof(*cmd)); cmd = gpr_malloc(sizeof(*cmd));
cmd->error = error; cmd->error = error;
cmd->then_call = op->on_complete; cmd->then_call = op->on_complete;
grpc_closure_init(&cmd->closure, free_message, cmd); grpc_closure_init(&cmd->closure, free_message, cmd,
grpc_schedule_on_exec_ctx);
op->on_complete = &cmd->closure; op->on_complete = &cmd->closure;
*which = error; *which = error;
} }
@ -269,14 +269,14 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg, static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
made_transport_op *op = arg; made_transport_op *op = arg;
grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
NULL);
gpr_free(op); gpr_free(op);
} }
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) { grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
made_transport_op *op = gpr_malloc(sizeof(*op)); made_transport_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op); grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete; op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op)); memset(&op->op, 0, sizeof(op->op));
op->op.on_consumed = &op->outer_on_complete; op->op.on_consumed = &op->outer_on_complete;
@ -292,8 +292,7 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg, static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
made_transport_stream_op *op = arg; made_transport_stream_op *op = arg;
grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error), grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
NULL);
gpr_free(op); gpr_free(op);
} }
@ -301,7 +300,7 @@ grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_complete) { grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_malloc(sizeof(*op)); made_transport_stream_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op, grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
op); op, grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete; op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op)); memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete; op->op.on_complete = &op->outer_on_complete;

@ -148,7 +148,8 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_init(&outgoing);
grpc_slice_buffer_add(&outgoing, slice); grpc_slice_buffer_add(&outgoing, slice);
grpc_closure_init(&done_write_closure, done_write, &a); grpc_closure_init(&done_write_closure, done_write, &a,
grpc_schedule_on_exec_ctx);
/* Write data */ /* Write data */
grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure); grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure);
@ -175,7 +176,8 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&args.incoming); grpc_slice_buffer_init(&args.incoming);
gpr_event_init(&args.read_done); gpr_event_init(&args.read_done);
grpc_closure read_done_closure; grpc_closure read_done_closure;
grpc_closure_init(&read_done_closure, read_done, &args); grpc_closure_init(&read_done_closure, read_done, &args,
grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming, grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming,
&read_done_closure); &read_done_closure);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -108,16 +108,18 @@ int main(int argc, char **argv) {
grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test"); grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test");
gpr_event ev1; gpr_event ev1;
gpr_event_init(&ev1); gpr_event_init(&ev1);
grpc_resolver_next(&exec_ctx, resolver, &result, grpc_resolver_next(
grpc_closure_create(on_done, &ev1)); &exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(5, &ev1)); GPR_ASSERT(wait_loop(5, &ev1));
GPR_ASSERT(result == NULL); GPR_ASSERT(result == NULL);
gpr_event ev2; gpr_event ev2;
gpr_event_init(&ev2); gpr_event_init(&ev2);
grpc_resolver_next(&exec_ctx, resolver, &result, grpc_resolver_next(
grpc_closure_create(on_done, &ev2)); &exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(30, &ev2)); GPR_ASSERT(wait_loop(30, &ev2));
GPR_ASSERT(result != NULL); GPR_ASSERT(result != NULL);

@ -68,8 +68,8 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
on_resolution_arg on_res_arg; on_resolution_arg on_res_arg;
memset(&on_res_arg, 0, sizeof(on_res_arg)); memset(&on_res_arg, 0, sizeof(on_res_arg));
on_res_arg.expected_server_name = uri->path; on_res_arg.expected_server_name = uri->path;
grpc_closure *on_resolution = grpc_closure *on_resolution = grpc_closure_create(
grpc_closure_create(on_resolution_cb, &on_res_arg); on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result, grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution); on_resolution);

@ -94,7 +94,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_tcp_server_acceptor *acceptor) { grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor); gpr_free(acceptor);
test_tcp_server *server = arg; test_tcp_server *server = arg;
grpc_closure_init(&on_read, handle_read, NULL); grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.incoming_buffer); grpc_slice_buffer_init(&state.incoming_buffer);
grpc_slice_buffer_init(&state.temp_incoming_buffer); grpc_slice_buffer_init(&state.temp_incoming_buffer);
state.tcp = tcp; state.tcp = tcp;

@ -147,8 +147,8 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_tcp_server_acceptor *acceptor) { grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor); gpr_free(acceptor);
test_tcp_server *server = arg; test_tcp_server *server = arg;
grpc_closure_init(&on_read, handle_read, NULL); grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
grpc_closure_init(&on_write, done_write, NULL); grpc_closure_init(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.temp_incoming_buffer); grpc_slice_buffer_init(&state.temp_incoming_buffer);
grpc_slice_buffer_init(&state.outgoing_buffer); grpc_slice_buffer_init(&state.outgoing_buffer);
state.tcp = tcp; state.tcp = tcp;

@ -87,7 +87,7 @@ static void fake_resolver_shutdown(grpc_exec_ctx* exec_ctx,
gpr_mu_lock(&r->mu); gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) { if (r->next_completion != NULL) {
*r->target_result = NULL; *r->target_result = NULL;
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
gpr_mu_unlock(&r->mu); gpr_mu_unlock(&r->mu);
@ -100,7 +100,7 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result = *r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL; r->next_completion = NULL;
} }
} }

@ -376,15 +376,20 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
gpr_ref_init(&conn->refcount, 1); gpr_ref_init(&conn->refcount, 1);
conn->pollset_set = grpc_pollset_set_create(); conn->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset); grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn); grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn,
grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, grpc_schedule_on_exec_ctx);
conn); grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn,
grpc_closure_init(&conn->on_write_response_done, on_write_response_done, grpc_schedule_on_exec_ctx);
conn); grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn,
grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn); grpc_schedule_on_exec_ctx);
grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn); grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn,
grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn); grpc_schedule_on_exec_ctx);
grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn); grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&conn->client_read_buffer); grpc_slice_buffer_init(&conn->client_read_buffer);
grpc_slice_buffer_init(&conn->client_deferred_write_buffer); grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
grpc_slice_buffer_init(&conn->client_write_buffer); grpc_slice_buffer_init(&conn->client_write_buffer);
@ -471,7 +476,8 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
gpr_free(proxy->proxy_name); gpr_free(proxy->proxy_name);
grpc_channel_args_destroy(proxy->channel_args); grpc_channel_args_destroy(proxy->channel_args);
grpc_closure destroyed; grpc_closure destroyed;
grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset); grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed);
gpr_free(proxy); gpr_free(proxy);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -349,11 +349,11 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg,
addrs->addrs = gpr_malloc(sizeof(*addrs->addrs)); addrs->addrs = gpr_malloc(sizeof(*addrs->addrs));
addrs->addrs[0].len = 0; addrs->addrs[0].len = 0;
*r->addrs = addrs; *r->addrs = addrs;
grpc_exec_ctx_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE);
} else { } else {
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, r->on_done, exec_ctx, r->on_done,
GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1));
} }
gpr_free(r->addr); gpr_free(r->addr);
@ -398,7 +398,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
future_connect *fc = arg; future_connect *fc = arg;
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
*fc->ep = NULL; *fc->ep = NULL;
grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error), NULL); grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error));
} else if (g_server != NULL) { } else if (g_server != NULL) {
grpc_endpoint *client; grpc_endpoint *client;
grpc_endpoint *server; grpc_endpoint *server;
@ -410,7 +410,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL); grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL); grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE);
} else { } else {
sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline); sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
} }
@ -421,8 +421,8 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline) { grpc_endpoint **ep, gpr_timespec deadline) {
if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) { if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
*ep = NULL; *ep = NULL;
grpc_exec_ctx_sched(exec_ctx, closure, grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE("Connect deadline exceeded"), NULL); GRPC_ERROR_CREATE("Connect deadline exceeded"));
return; return;
} }

@ -217,9 +217,9 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg,
&message); &message);
grpc_call_next_op(exec_ctx, elem, op); grpc_call_next_op(exec_ctx, elem, op);
} }
grpc_exec_ctx_sched( grpc_closure_sched(
exec_ctx, calld->recv_im_ready, exec_ctx, calld->recv_im_ready,
GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1), NULL); GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1));
} }
static void start_transport_stream_op(grpc_exec_ctx *exec_ctx, static void start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@ -228,7 +228,8 @@ static void start_transport_stream_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
if (op->recv_initial_metadata != NULL) { if (op->recv_initial_metadata != NULL) {
calld->recv_im_ready = op->recv_initial_metadata_ready; calld->recv_im_ready = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = grpc_closure_create(recv_im_ready, elem); op->recv_initial_metadata_ready =
grpc_closure_create(recv_im_ready, elem, grpc_schedule_on_exec_ctx);
} }
grpc_call_next_op(exec_ctx, elem, op); grpc_call_next_op(exec_ctx, elem, op);
} }

@ -90,9 +90,10 @@ static void test_get(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req, grpc_httpcli_get(
n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
@ -130,9 +131,11 @@ static void test_post(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, grpc_httpcli_post(
"hello", 5, n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
grpc_closure_create(on_finish, &response), &response); n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
@ -207,7 +210,8 @@ int main(int argc, char **argv) {
test_post(port); test_post(port);
grpc_httpcli_context_destroy(&g_context); grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pops, &g_pops); grpc_closure_init(&destroyed, destroy_pops, &g_pops,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed); &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -91,9 +91,10 @@ static void test_get(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req, grpc_httpcli_get(
n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
@ -132,9 +133,11 @@ static void test_post(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, grpc_httpcli_post(
"hello", 5, n_seconds_time(15), &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
grpc_closure_create(on_finish, &response), &response); n_seconds_time(15),
grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
@ -210,7 +213,8 @@ int main(int argc, char **argv) {
test_post(port); test_post(port);
grpc_httpcli_context_destroy(&g_context); grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pops, &g_pops); grpc_closure_init(&destroyed, destroy_pops, &g_pops,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed); &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -60,9 +60,9 @@ static void test_code(void) {
closure_list.head = NULL; closure_list.head = NULL;
closure_list.tail = NULL; closure_list.tail = NULL;
grpc_closure_init(&closure, NULL, NULL); grpc_closure_init(&closure, NULL, NULL, grpc_schedule_on_exec_ctx);
grpc_closure_create(NULL, NULL); grpc_closure_create(NULL, NULL, grpc_schedule_on_exec_ctx);
grpc_closure_list_move(NULL, NULL); grpc_closure_list_move(NULL, NULL);
grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo")); grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
@ -72,8 +72,8 @@ static void test_code(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL); grpc_closure_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"));
grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL); grpc_closure_list_sched(&exec_ctx, &closure_list);
/* endpoint.h */ /* endpoint.h */
grpc_endpoint endpoint; grpc_endpoint endpoint;
@ -99,7 +99,6 @@ static void test_code(void) {
/* executor.h */ /* executor.h */
grpc_executor_init(); grpc_executor_init();
grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
grpc_executor_shutdown(); grpc_executor_shutdown();
/* pollset.h */ /* pollset.h */

@ -59,9 +59,10 @@ static void test_execute_one(void) {
grpc_combiner *lock = grpc_combiner_create(NULL); grpc_combiner *lock = grpc_combiner_create(NULL);
bool done = false; bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner_execute(&exec_ctx, lock, grpc_closure_sched(&exec_ctx,
grpc_closure_create(set_bool_to_true, &done), grpc_closure_create(set_bool_to_true, &done,
GRPC_ERROR_NONE, false); grpc_combiner_scheduler(lock, false)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(done); GPR_ASSERT(done);
grpc_combiner_destroy(&exec_ctx, lock); grpc_combiner_destroy(&exec_ctx, lock);
@ -94,9 +95,10 @@ static void execute_many_loop(void *a) {
ex_args *c = gpr_malloc(sizeof(*c)); ex_args *c = gpr_malloc(sizeof(*c));
c->ctr = &args->ctr; c->ctr = &args->ctr;
c->value = n++; c->value = n++;
grpc_combiner_execute(&exec_ctx, args->lock, grpc_closure_sched(
grpc_closure_create(check_one, c), GRPC_ERROR_NONE, &exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler(
false); args->lock, false)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
} }
// sleep for a little bit, to test a combiner draining and another thread // sleep for a little bit, to test a combiner draining and another thread
@ -134,9 +136,10 @@ static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
} }
static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner_execute_finally(exec_ctx, arg, grpc_closure_sched(exec_ctx, grpc_closure_create(
grpc_closure_create(in_finally, NULL), in_finally, NULL,
GRPC_ERROR_NONE, false); grpc_combiner_finally_scheduler(arg, false)),
GRPC_ERROR_NONE);
} }
static void test_execute_finally(void) { static void test_execute_finally(void) {
@ -144,8 +147,10 @@ static void test_execute_finally(void) {
grpc_combiner *lock = grpc_combiner_create(NULL); grpc_combiner *lock = grpc_combiner_create(NULL);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner_execute(&exec_ctx, lock, grpc_closure_create(add_finally, lock), grpc_closure_sched(&exec_ctx,
GRPC_ERROR_NONE, false); grpc_closure_create(add_finally, lock,
grpc_combiner_scheduler(lock, false)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(got_in_finally); GPR_ASSERT(got_in_finally);
grpc_combiner_destroy(&exec_ctx, lock); grpc_combiner_destroy(&exec_ctx, lock);

@ -81,7 +81,8 @@ int main(int argc, char **argv) {
g_pollset = gpr_malloc(grpc_pollset_size()); g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu); grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown(); grpc_shutdown();

@ -211,9 +211,10 @@ static void read_and_write_test(grpc_endpoint_test_config config,
state.write_done = 0; state.write_done = 0;
state.current_read_data = 0; state.current_read_data = 0;
state.current_write_data = 0; state.current_write_data = 0;
grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state); grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&state.done_write, read_and_write_test_write_handler, grpc_closure_init(&state.done_write, read_and_write_test_write_handler,
&state); &state, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.outgoing); grpc_slice_buffer_init(&state.outgoing);
grpc_slice_buffer_init(&state.incoming); grpc_slice_buffer_init(&state.incoming);
@ -290,16 +291,19 @@ static void multiple_shutdown_test(grpc_endpoint_test_config config) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count)); grpc_closure_create(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 0); wait_for_fail_count(&exec_ctx, &fail_count, 0);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep); grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
wait_for_fail_count(&exec_ctx, &fail_count, 1); wait_for_fail_count(&exec_ctx, &fail_count, 1);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count)); grpc_closure_create(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 2); wait_for_fail_count(&exec_ctx, &fail_count, 2);
grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a")); grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer, grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
grpc_closure_create(inc_on_failure, &fail_count)); grpc_closure_create(inc_on_failure, &fail_count,
grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 3); wait_for_fail_count(&exec_ctx, &fail_count, 3);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep); grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
wait_for_fail_count(&exec_ctx, &fail_count, 3); wait_for_fail_count(&exec_ctx, &fail_count, 3);

@ -102,7 +102,8 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
int i; int i;
for (i = 0; i < num_pollsets; i++) { for (i = 0; i < num_pollsets; i++) {
grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset); grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed); grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);

@ -219,8 +219,8 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
se->sv = sv; se->sv = sv;
se->em_fd = grpc_fd_create(fd, "listener"); se->em_fd = grpc_fd_create(fd, "listener");
grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd); grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
se->session_read_closure.cb = session_read_cb; grpc_closure_init(&se->session_read_closure, session_read_cb, se,
se->session_read_closure.cb_arg = se; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure); grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure); grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
@ -249,8 +249,8 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
sv->em_fd = grpc_fd_create(fd, "server"); sv->em_fd = grpc_fd_create(fd, "server");
grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd); grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
/* Register to be interested in reading from listen_fd. */ /* Register to be interested in reading from listen_fd. */
sv->listen_closure.cb = listen_cb; grpc_closure_init(&sv->listen_closure, listen_cb, sv,
sv->listen_closure.cb_arg = sv; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure); grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);
return port; return port;
@ -333,8 +333,8 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
if (errno == EAGAIN) { if (errno == EAGAIN) {
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) { if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
cl->write_closure.cb = client_session_write; grpc_closure_init(&cl->write_closure, client_session_write, cl,
cl->write_closure.cb_arg = cl; grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure); grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
cl->client_write_cnt++; cl->client_write_cnt++;
} else { } else {
@ -459,10 +459,10 @@ static void test_grpc_fd_change(void) {
grpc_closure second_closure; grpc_closure second_closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
first_closure.cb = first_read_callback; grpc_closure_init(&first_closure, first_read_callback, &a,
first_closure.cb_arg = &a; grpc_schedule_on_exec_ctx);
second_closure.cb = second_read_callback; grpc_closure_init(&second_closure, second_read_callback, &b,
second_closure.cb_arg = &b; grpc_schedule_on_exec_ctx);
init_change_data(&a); init_change_data(&a);
init_change_data(&b); init_change_data(&b);
@ -546,7 +546,8 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu); grpc_pollset_init(g_pollset, &g_mu);
test_grpc_fd(); test_grpc_fd();
test_grpc_fd_change(); test_grpc_fd_change();
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset); gpr_free(g_pollset);

@ -71,7 +71,8 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
grpc_pollset_set_destroy(args->pollset_set); grpc_pollset_set_destroy(args->pollset_set);
grpc_closure do_nothing_cb; grpc_closure do_nothing_cb;
grpc_closure_init(&do_nothing_cb, do_nothing, NULL); grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb); grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
// exec_ctx needs to be flushed before calling grpc_pollset_destroy() // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
@ -136,8 +137,10 @@ static void test_localhost(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, "localhost:1", NULL, args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_succeed, &args), &args.addrs); &exec_ctx, "localhost:1", NULL, args.pollset_set,
grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -147,8 +150,10 @@ static void test_default_port(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, "localhost", "1", args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_succeed, &args), &args.addrs); &exec_ctx, "localhost", "1", args.pollset_set,
grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -158,8 +163,10 @@ static void test_missing_default_port(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, "localhost", NULL, args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_fail, &args), &args.addrs); &exec_ctx, "localhost", NULL, args.pollset_set,
grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -169,8 +176,10 @@ static void test_ipv6_with_port(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_succeed, &args), &args.addrs); &exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set,
grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -185,8 +194,10 @@ static void test_ipv6_without_port(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, kCases[i], "80", args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_succeed, &args), &args.addrs); &exec_ctx, kCases[i], "80", args.pollset_set,
grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -202,8 +213,10 @@ static void test_invalid_ip_addresses(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, kCases[i], NULL, args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_fail, &args), &args.addrs); &exec_ctx, kCases[i], NULL, args.pollset_set,
grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -219,8 +232,10 @@ static void test_unparseable_hostports(void) {
args_struct args; args_struct args;
args_init(&exec_ctx, &args); args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args); poll_pollset_until_request_done(&args);
grpc_resolve_address(&exec_ctx, kCases[i], "1", args.pollset_set, grpc_resolve_address(
grpc_closure_create(must_fail, &args), &args.addrs); &exec_ctx, kCases[i], "1", args.pollset_set,
grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
&args.addrs);
args_finish(&exec_ctx, &args); args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }

@ -45,7 +45,9 @@ static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
*(bool *)a = true; *(bool *)a = true;
} }
grpc_closure *set_bool(bool *p) { return grpc_closure_create(set_bool_cb, p); } grpc_closure *set_bool(bool *p) {
return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx);
}
typedef struct { typedef struct {
size_t size; size_t size;
@ -67,7 +69,7 @@ grpc_closure *make_reclaimer(grpc_resource_user *resource_user, size_t size,
a->size = size; a->size = size;
a->resource_user = resource_user; a->resource_user = resource_user;
a->then = then; a->then = then;
return grpc_closure_create(reclaimer_cb, a); return grpc_closure_create(reclaimer_cb, a, grpc_schedule_on_exec_ctx);
} }
static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg, static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
@ -76,7 +78,8 @@ static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE); grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE);
} }
grpc_closure *make_unused_reclaimer(grpc_closure *then) { grpc_closure *make_unused_reclaimer(grpc_closure *then) {
return grpc_closure_create(unused_reclaimer_cb, then); return grpc_closure_create(unused_reclaimer_cb, then,
grpc_schedule_on_exec_ctx);
} }
static void destroy_user(grpc_resource_user *usr) { static void destroy_user(grpc_resource_user *usr) {

@ -113,7 +113,7 @@ void test_succeeds(void) {
/* connect to it */ /* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)addr, GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)addr,
(socklen_t *)&resolved_addr.len) == 0); (socklen_t *)&resolved_addr.len) == 0);
grpc_closure_init(&done, must_succeed, NULL); grpc_closure_init(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
&resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
@ -163,7 +163,7 @@ void test_fails(void) {
gpr_mu_unlock(g_mu); gpr_mu_unlock(g_mu);
/* connect to a broken address */ /* connect to a broken address */
grpc_closure_init(&done, must_fail, NULL); grpc_closure_init(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
&resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
@ -207,7 +207,8 @@ int main(int argc, char **argv) {
gpr_log(GPR_ERROR, "End of first test"); gpr_log(GPR_ERROR, "End of first test");
test_fails(); test_fails();
grpc_pollset_set_destroy(g_pollset_set); grpc_pollset_set_destroy(g_pollset_set);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown(); grpc_shutdown();

@ -194,7 +194,7 @@ static void read_test(size_t num_bytes, size_t slice_size) {
state.read_bytes = 0; state.read_bytes = 0;
state.target_read_bytes = written_bytes; state.target_read_bytes = written_bytes;
grpc_slice_buffer_init(&state.incoming); grpc_slice_buffer_init(&state.incoming);
grpc_closure_init(&state.read_cb, read_cb, &state); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@ -245,7 +245,7 @@ static void large_read_test(size_t slice_size) {
state.read_bytes = 0; state.read_bytes = 0;
state.target_read_bytes = (size_t)written_bytes; state.target_read_bytes = (size_t)written_bytes;
grpc_slice_buffer_init(&state.incoming); grpc_slice_buffer_init(&state.incoming);
grpc_closure_init(&state.read_cb, read_cb, &state); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@ -384,7 +384,8 @@ static void write_test(size_t num_bytes, size_t slice_size) {
grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_init(&outgoing);
grpc_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
grpc_closure_init(&write_done_closure, write_done, &state); grpc_closure_init(&write_done_closure, write_done, &state,
grpc_schedule_on_exec_ctx);
grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
drain_socket_blocking(sv[0], num_bytes, num_bytes); drain_socket_blocking(sv[0], num_bytes, num_bytes);
@ -429,7 +430,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure fd_released_cb; grpc_closure fd_released_cb;
int fd_released_done = 0; int fd_released_done = 0;
grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done); grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
grpc_schedule_on_exec_ctx);
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR, "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
@ -452,7 +454,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
state.read_bytes = 0; state.read_bytes = 0;
state.target_read_bytes = written_bytes; state.target_read_bytes = written_bytes;
grpc_slice_buffer_init(&state.incoming); grpc_slice_buffer_init(&state.incoming);
grpc_closure_init(&state.read_cb, read_cb, &state); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@ -561,7 +563,8 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu); grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu);
run_tests(); run_tests();
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown(); grpc_shutdown();

@ -104,7 +104,7 @@ static void server_weak_ref_shutdown(grpc_exec_ctx *exec_ctx, void *arg,
static void server_weak_ref_init(server_weak_ref *weak_ref) { static void server_weak_ref_init(server_weak_ref *weak_ref) {
weak_ref->server = NULL; weak_ref->server = NULL;
grpc_closure_init(&weak_ref->server_shutdown, server_weak_ref_shutdown, grpc_closure_init(&weak_ref->server_shutdown, server_weak_ref_shutdown,
weak_ref); weak_ref, grpc_schedule_on_exec_ctx);
} }
/* Make weak_ref->server_shutdown a shutdown_starting cb on server. /* Make weak_ref->server_shutdown a shutdown_starting cb on server.
@ -366,7 +366,8 @@ int main(int argc, char **argv) {
test_connect(1); test_connect(1);
test_connect(10); test_connect(10);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown(); grpc_shutdown();

@ -234,7 +234,8 @@ int main(int argc, char **argv) {
test_receive(1); test_receive(1);
test_receive(10); test_receive(10);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset); grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset); gpr_free(g_pollset);

@ -565,7 +565,7 @@ static int compute_engine_httpcli_get_success_override(
grpc_httpcli_response *response) { grpc_httpcli_response *response) {
validate_compute_engine_http_request(request); validate_compute_engine_http_request(request);
*response = http_response(200, valid_oauth2_json_response); *response = http_response(200, valid_oauth2_json_response);
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }
@ -575,7 +575,7 @@ static int compute_engine_httpcli_get_failure_override(
grpc_httpcli_response *response) { grpc_httpcli_response *response) {
validate_compute_engine_http_request(request); validate_compute_engine_http_request(request);
*response = http_response(403, "Not Authorized."); *response = http_response(403, "Not Authorized.");
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }
@ -668,7 +668,7 @@ static int refresh_token_httpcli_post_success(
grpc_closure *on_done, grpc_httpcli_response *response) { grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size); validate_refresh_token_http_request(request, body, body_size);
*response = http_response(200, valid_oauth2_json_response); *response = http_response(200, valid_oauth2_json_response);
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }
@ -678,7 +678,7 @@ static int refresh_token_httpcli_post_failure(
grpc_closure *on_done, grpc_httpcli_response *response) { grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size); validate_refresh_token_http_request(request, body, body_size);
*response = http_response(403, "Not Authorized."); *response = http_response(403, "Not Authorized.");
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }
@ -917,7 +917,7 @@ static int default_creds_gce_detection_httpcli_get_success_override(
response->hdrs = headers; response->hdrs = headers;
GPR_ASSERT(strcmp(request->http.path, "/") == 0); GPR_ASSERT(strcmp(request->http.path, "/") == 0);
GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0); GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }
@ -975,7 +975,7 @@ static int default_creds_gce_detection_httpcli_get_failure_override(
GPR_ASSERT(strcmp(request->http.path, "/") == 0); GPR_ASSERT(strcmp(request->http.path, "/") == 0);
GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0); GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
*response = http_response(200, ""); *response = http_response(200, "");
grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1; return 1;
} }

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save