Merge branch 'thread_pool' into ALL-the-things

reviewable/pr11336/r1
Craig Tiller 8 years ago
commit 7ebf31bcb2
  1. 5
      BUILD
  2. 10
      CMakeLists.txt
  3. 10
      Makefile
  4. 2
      binding.gyp
  5. 5
      build.yaml
  6. 2
      config.m4
  7. 2
      config.w32
  8. 8
      gRPC-Core.podspec
  9. 5
      grpc.gemspec
  10. 5
      package.xml
  11. 204
      src/core/ext/filters/client_channel/client_channel.c
  12. 9
      src/core/ext/filters/client_channel/lb_policy.c
  13. 18
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  14. 2
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  15. 2
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  16. 4
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  17. 4
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
  18. 139
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  19. 2
      src/core/ext/transport/chttp2/transport/frame_ping.c
  20. 3
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  21. 8
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  22. 5
      src/core/ext/transport/chttp2/transport/internal.h
  23. 2
      src/core/ext/transport/chttp2/transport/parsing.c
  24. 3
      src/core/ext/transport/chttp2/transport/writing.c
  25. 213
      src/core/lib/iomgr/combiner.c
  26. 8
      src/core/lib/iomgr/combiner.h
  27. 4
      src/core/lib/iomgr/endpoint.c
  28. 4
      src/core/lib/iomgr/endpoint.h
  29. 100
      src/core/lib/iomgr/ev_epoll1_linux.c
  30. 198
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  31. 172
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
  32. 156
      src/core/lib/iomgr/ev_epollex_linux.c
  33. 294
      src/core/lib/iomgr/ev_epollsig_linux.c
  34. 31
      src/core/lib/iomgr/ev_poll_posix.c
  35. 26
      src/core/lib/iomgr/ev_posix.c
  36. 16
      src/core/lib/iomgr/ev_posix.h
  37. 1
      src/core/lib/iomgr/exec_ctx.c
  38. 247
      src/core/lib/iomgr/executor.c
  39. 9
      src/core/lib/iomgr/executor.h
  40. 7
      src/core/lib/iomgr/iomgr.c
  41. 4
      src/core/lib/iomgr/iomgr.h
  42. 31
      src/core/lib/iomgr/resource_quota.c
  43. 19
      src/core/lib/iomgr/tcp_posix.c
  44. 87
      src/core/lib/iomgr/workqueue.h
  45. 65
      src/core/lib/iomgr/workqueue_uv.c
  46. 37
      src/core/lib/iomgr/workqueue_uv.h
  47. 63
      src/core/lib/iomgr/workqueue_windows.c
  48. 37
      src/core/lib/iomgr/workqueue_windows.h
  49. 36
      src/core/lib/security/transport/client_auth_filter.c
  50. 6
      src/core/lib/security/transport/secure_endpoint.c
  51. 5
      src/core/lib/surface/call.c
  52. 11
      src/core/lib/surface/init.c
  53. 4
      src/core/lib/transport/transport.h
  54. 3
      src/core/lib/transport/transport_op_string.c
  55. 2
      src/python/grpcio/grpc_core_dependencies.py
  56. 11
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
  57. 2
      test/core/client_channel/resolvers/dns_resolver_test.c
  58. 22
      test/core/client_channel/resolvers/fake_resolver_test.c
  59. 2
      test/core/client_channel/resolvers/sockaddr_resolver_test.c
  60. 7
      test/core/end2end/fake_resolver.c
  61. 16
      test/core/end2end/fixtures/http_proxy_fixture.c
  62. 6
      test/core/end2end/fuzzers/api_fuzzer.c
  63. 2
      test/core/end2end/fuzzers/client_fuzzer.c
  64. 2
      test/core/end2end/fuzzers/server_fuzzer.c
  65. 62
      test/core/iomgr/combiner_test.c
  66. 17
      test/core/iomgr/endpoint_tests.c
  67. 94
      test/core/iomgr/ev_epollsig_linux_test.c
  68. 15
      test/core/iomgr/fd_conservation_posix_test.c
  69. 4
      test/core/iomgr/fd_posix_test.c
  70. 4
      test/core/iomgr/pollset_set_test.c
  71. 15
      test/core/iomgr/resolve_address_posix_test.c
  72. 17
      test/core/iomgr/resolve_address_test.c
  73. 345
      test/core/iomgr/resource_quota_test.c
  74. 1
      test/core/iomgr/tcp_posix_test.c
  75. 13
      test/core/util/mock_endpoint.c
  76. 13
      test/core/util/passthru_endpoint.c
  77. 19
      test/core/util/trickle_endpoint.c
  78. 14
      test/cpp/microbenchmarks/bm_chttp2_transport.cc
  79. 66
      test/cpp/microbenchmarks/bm_closure.cc
  80. 5
      tools/doxygen/Doxyfile.core.internal
  81. 8
      tools/run_tests/generated/sources_and_headers.json
  82. 7
      vsprojects/vcxproj/grpc/grpc.vcxproj
  83. 15
      vsprojects/vcxproj/grpc/grpc.vcxproj.filters
  84. 7
      vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj
  85. 15
      vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters
  86. 7
      vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
  87. 15
      vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters

@ -536,8 +536,6 @@ grpc_cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_uv.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
"src/core/lib/json/json_string.c",
@ -655,9 +653,6 @@ grpc_cc_library(
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_uv.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
"src/core/lib/json/json_reader.h",

@ -1003,8 +1003,6 @@ add_library(grpc
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_uv.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
src/core/lib/json/json_string.c
@ -1342,8 +1340,6 @@ add_library(grpc_cronet
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_uv.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
src/core/lib/json/json_string.c
@ -1661,8 +1657,6 @@ add_library(grpc_test_util
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_uv.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
src/core/lib/json/json_string.c
@ -1925,8 +1919,6 @@ add_library(grpc_unsecure
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_uv.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
src/core/lib/json/json_string.c
@ -2579,8 +2571,6 @@ add_library(grpc++_cronet
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_uv.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
src/core/lib/json/json_string.c

@ -2983,8 +2983,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \
@ -3320,8 +3318,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \
@ -3638,8 +3634,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \
@ -3874,8 +3868,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \
@ -4514,8 +4506,6 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \

@ -735,8 +735,6 @@
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',

@ -254,9 +254,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_cv.h
- src/core/lib/iomgr/wakeup_fd_pipe.h
- src/core/lib/iomgr/wakeup_fd_posix.h
- src/core/lib/iomgr/workqueue.h
- src/core/lib/iomgr/workqueue_uv.h
- src/core/lib/iomgr/workqueue_windows.h
- src/core/lib/json/json.h
- src/core/lib/json/json_common.h
- src/core/lib/json/json_reader.h
@ -375,8 +372,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_uv.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c
- src/core/lib/json/json_string.c

@ -167,8 +167,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
src/core/lib/json/json_string.c \

@ -144,8 +144,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\wakeup_fd_nospecial.c " +
"src\\core\\lib\\iomgr\\wakeup_fd_pipe.c " +
"src\\core\\lib\\iomgr\\wakeup_fd_posix.c " +
"src\\core\\lib\\iomgr\\workqueue_uv.c " +
"src\\core\\lib\\iomgr\\workqueue_windows.c " +
"src\\core\\lib\\json\\json.c " +
"src\\core\\lib\\json\\json_reader.c " +
"src\\core\\lib\\json\\json_string.c " +

@ -333,9 +333,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_uv.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
'src/core/lib/json/json_reader.h',
@ -560,8 +557,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
@ -819,9 +814,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_uv.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
'src/core/lib/json/json_reader.h',

@ -249,9 +249,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_cv.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue.h )
s.files += %w( src/core/lib/iomgr/workqueue_uv.h )
s.files += %w( src/core/lib/iomgr/workqueue_windows.h )
s.files += %w( src/core/lib/json/json.h )
s.files += %w( src/core/lib/json/json_common.h )
s.files += %w( src/core/lib/json/json_reader.h )
@ -476,8 +473,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_nospecial.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_uv.c )
s.files += %w( src/core/lib/iomgr/workqueue_windows.c )
s.files += %w( src/core/lib/json/json.c )
s.files += %w( src/core/lib/json/json_reader.c )
s.files += %w( src/core/lib/json/json_string.c )

@ -263,9 +263,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_cv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_uv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_common.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_reader.h" role="src" />
@ -490,8 +487,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_nospecial.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_uv.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_reader.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_string.c" role="src" />

@ -284,7 +284,7 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w,
grpc_combiner_scheduler(chand->combiner, false));
grpc_combiner_scheduler(chand->combiner));
w->state = current_state;
w->lb_policy = lb_policy;
grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
@ -628,7 +628,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_closure_sched(
exec_ctx,
grpc_closure_init(&op->handler_private.closure, start_transport_op_locked,
op, grpc_combiner_scheduler(chand->combiner, false)),
op, grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
@ -659,12 +659,12 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
chand->combiner = grpc_combiner_create(NULL);
chand->combiner = grpc_combiner_create();
gpr_mu_init(&chand->info_mu);
chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed,
on_resolver_result_changed_locked, chand,
grpc_combiner_scheduler(chand->combiner, false));
grpc_combiner_scheduler(chand->combiner));
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
@ -723,9 +723,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
grpc_closure_sched(
exec_ctx,
grpc_closure_create(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner, false)),
exec_ctx, grpc_closure_create(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
@ -755,11 +754,6 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
* PER-CALL FUNCTIONS
*/
#define GET_CALL(call_data) \
((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
@ -780,11 +774,9 @@ typedef struct client_channel_call_data {
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
grpc_error *cancel_error;
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
/** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
bit is 0), or a pointer to an error (if the lowest bit is 1) */
gpr_atm subchannel_call_or_error;
gpr_arena *arena;
bool pick_pending;
@ -806,10 +798,43 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
typedef struct {
grpc_subchannel_call *subchannel_call;
grpc_error *error;
} call_or_error;
static call_or_error get_call_or_error(call_data *p) {
gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
if (c == 0)
return (call_or_error){NULL, NULL};
else if (c & 1)
return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
else
return (call_or_error){(grpc_subchannel_call *)c, NULL};
}
static bool set_call_or_error(call_data *p, call_or_error coe) {
// this should always be under a lock
call_or_error existing = get_call_or_error(p);
if (existing.error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(coe.error);
return false;
}
GPR_ASSERT(existing.subchannel_call == NULL);
if (coe.error != GRPC_ERROR_NONE) {
GPR_ASSERT(coe.subchannel_call == NULL);
gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
} else {
GPR_ASSERT(coe.subchannel_call != NULL);
gpr_atm_rel_store(&p->subchannel_call_or_error,
(gpr_atm)coe.subchannel_call);
}
return true;
}
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *call_elem) {
grpc_subchannel_call *scc = GET_CALL((call_data *)call_elem->call_data);
return scc == CANCELLED_CALL ? NULL : scc;
return get_call_or_error(call_elem->call_data).subchannel_call;
}
static void add_waiting_locked(call_data *calld,
@ -841,18 +866,18 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
return;
}
grpc_subchannel_call *call = GET_CALL(calld);
call_or_error call = get_call_or_error(calld);
grpc_transport_stream_op_batch **ops = calld->waiting_ops;
size_t nops = calld->waiting_ops_count;
if (call == CANCELLED_CALL) {
fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
if (call.error != GRPC_ERROR_NONE) {
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(call.error));
return;
}
calld->waiting_ops = NULL;
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
for (size_t i = 0; i < nops; i++) {
grpc_subchannel_call_process_op(exec_ctx, call, ops[i]);
grpc_subchannel_call_process_op(exec_ctx, call.subchannel_call, ops[i]);
}
gpr_free(ops);
}
@ -913,19 +938,23 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
calld->pick_pending = false;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, (gpr_atm)CANCELLED_CALL);
fail_locked(exec_ctx, calld,
error == GRPC_ERROR_NONE
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy")
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1));
} else if (GET_CALL(calld) == CANCELLED_CALL) {
grpc_error *failure =
error == GRPC_ERROR_NONE
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy")
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
fail_locked(exec_ctx, calld, failure);
} else if (coe.error != GRPC_ERROR_NONE) {
/* already cancelled before subchannel became ready */
grpc_error *child_errors[] = {error, coe.error};
grpc_error *cancellation_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Cancelled before creating subchannel", &error, 1);
"Cancelled before creating subchannel", child_errors,
GPR_ARRAY_SIZE(child_errors));
/* if due to deadline, attach the deadline exceeded status to the error */
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
cancellation_error =
@ -945,8 +974,8 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
.context = calld->subchannel_call_context};
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
gpr_atm_rel_store(&calld->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
GPR_ASSERT(set_call_or_error(
calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
fail_locked(exec_ctx, calld, new_error);
@ -959,8 +988,9 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call = GET_CALL(calld);
if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
grpc_subchannel_call *subchannel_call =
get_call_or_error(calld).subchannel_call;
if (subchannel_call == NULL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
@ -1102,7 +1132,7 @@ static bool pick_subchannel_locked(
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking_locked, cpa,
grpc_combiner_scheduler(chand->combiner, true));
grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
@ -1119,58 +1149,45 @@ static void start_transport_stream_op_batch_locked_inner(
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_subchannel_call *call;
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(calld);
if (call == CANCELLED_CALL) {
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
exec_ctx, op, GRPC_ERROR_REF(coe.error));
/* early out */
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
/* early out */
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_stream) {
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
(gpr_atm)(uintptr_t)CANCELLED_CALL)) {
/* recurse to retry */
start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
/* early out */
return;
grpc_error *error = op->payload->cancel_stream.cancel_error;
/* Stash a copy of cancel_error in our call data, so that we can use
it for subsequent operations. This ensures that if the call is
cancelled before any ops are passed down (e.g., if the deadline
is in the past when the call starts), we can return the right
error to the caller when the first op does get passed down. */
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
if (calld->pick_pending) {
cancel_pick_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
/* Stash a copy of cancel_error in our call data, so that we can use
it for subsequent operations. This ensures that if the call is
cancelled before any ops are passed down (e.g., if the deadline
is in the past when the call starts), we can return the right
error to the caller when the first op does get passed down. */
calld->cancel_error =
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error);
if (calld->pick_pending) {
cancel_pick_locked(
exec_ctx, elem,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
} else {
fail_locked(exec_ctx, calld,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
/* early out */
return;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
}
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op,
GRPC_ERROR_REF(error));
/* early out */
return;
}
/* if we don't have a subchannel, try to get one */
if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) {
calld->pick_pending = true;
grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem,
grpc_combiner_scheduler(chand->combiner, true));
grpc_combiner_scheduler(chand->combiner));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
@ -1184,12 +1201,13 @@ static void start_transport_stream_op_batch_locked_inner(
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call,
(gpr_atm)CANCELLED_CALL);
grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
set_call_or_error(calld,
(call_or_error){.error = GRPC_ERROR_REF(error)});
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(error));
return; // Early out.
}
} else {
@ -1209,8 +1227,8 @@ static void start_transport_stream_op_batch_locked_inner(
.context = calld->subchannel_call_context};
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
gpr_atm_rel_store(&calld->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
GPR_ASSERT(set_call_or_error(
calld, (call_or_error){.subchannel_call = subchannel_call}));
if (error != GRPC_ERROR_NONE) {
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
@ -1289,17 +1307,17 @@ static void cc_start_transport_stream_op_batch(
op);
}
/* try to (atomically) get the call */
grpc_subchannel_call *call = GET_CALL(calld);
call_or_error coe = get_call_or_error(calld);
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
if (call == CANCELLED_CALL) {
if (coe.error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
exec_ctx, op, GRPC_ERROR_REF(coe.error));
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
@ -1308,10 +1326,9 @@ static void cc_start_transport_stream_op_batch(
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
op->handler_private.extra_arg = elem;
grpc_closure_sched(
exec_ctx,
grpc_closure_init(&op->handler_private.closure,
start_transport_stream_op_batch_locked, op,
grpc_combiner_scheduler(chand->combiner, false)),
exec_ctx, grpc_closure_init(&op->handler_private.closure,
start_transport_stream_op_batch_locked, op,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@ -1348,12 +1365,14 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
grpc_subchannel_call_set_cleanup_closure(call, then_schedule_closure);
call_or_error coe = get_call_or_error(calld);
GRPC_ERROR_UNREF(coe.error);
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
then_schedule_closure);
then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(!calld->pick_pending);
GPR_ASSERT(calld->waiting_ops_count == 0);
@ -1423,9 +1442,8 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
grpc_closure_sched(
exec_ctx,
grpc_closure_create(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner, false)),
exec_ctx, grpc_closure_create(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
return out;
@ -1475,6 +1493,6 @@ void grpc_client_channel_watch_connectivity_state(
grpc_closure_sched(
exec_ctx,
grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w,
grpc_combiner_scheduler(chand->combiner, true)),
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}

@ -89,11 +89,10 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
grpc_closure_sched(
exec_ctx,
grpc_closure_create(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner, false)),
GRPC_ERROR_NONE);
grpc_closure_sched(exec_ctx, grpc_closure_create(
shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));

@ -767,7 +767,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_zalloc(sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state;
@ -1222,7 +1222,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
gpr_time_add(now, glb_policy->client_stats_report_interval);
grpc_closure_init(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
&glb_policy->client_load_report_closure, now);
@ -1250,7 +1250,7 @@ static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
op.data.send_message.send_message = glb_policy->client_load_report_payload;
grpc_closure_init(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
@ -1355,13 +1355,13 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_closure_init(&glb_policy->lb_on_response_received,
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_combiner_scheduler(glb_policy->base.combiner));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
@ -1658,9 +1658,9 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
grpc_closure_init(
&glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_closure_init(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
}

@ -453,7 +453,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner, false));
grpc_combiner_scheduler(args->combiner));
return &p->base;
}

@ -641,7 +641,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner, false));
grpc_combiner_scheduler(args->combiner));
++subchannel_index;
}
}

@ -284,10 +284,10 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
grpc_closure_init(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner, false));
grpc_combiner_scheduler(r->base.combiner));
grpc_closure_init(&r->dns_ares_on_resolved_locked,
dns_ares_on_resolved_locked, r,
grpc_combiner_scheduler(r->base.combiner, false));
grpc_combiner_scheduler(r->base.combiner));
return &r->base;
}

@ -194,7 +194,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner, false));
grpc_combiner_scheduler(r->base.combiner));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
}
if (r->resolved_result != NULL) {
@ -216,7 +216,7 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
grpc_closure_create(dns_on_resolved_locked, r,
grpc_combiner_scheduler(r->base.combiner, false)),
grpc_combiner_scheduler(r->base.combiner)),
&r->addresses);
}

@ -50,7 +50,6 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -267,7 +266,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
t->combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
@ -288,29 +287,29 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure_init(&t->write_action, write_action, t,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
t, grpc_combiner_scheduler(t->combiner, false));
t, grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
t, grpc_combiner_scheduler(t->combiner, false));
t, grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_closure_init(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
grpc_bdp_estimator_init(&t->bdp_estimator, t->peer_string);
t->last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
@ -353,7 +352,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
grpc_chttp2_initiate_write(exec_ctx, t, false, "initial_write");
grpc_chttp2_initiate_write(exec_ctx, t, "initial_write");
}
/* configure http2 the way we like it */
@ -565,7 +564,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
grpc_chttp2_initiate_write(exec_ctx, t, "init");
post_benign_reclaimer(exec_ctx, t);
}
@ -583,9 +582,9 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_closure_sched(exec_ctx, grpc_closure_create(
destroy_transport_locked, t,
grpc_combiner_scheduler(t->combiner, false)),
grpc_closure_sched(exec_ctx,
grpc_closure_create(destroy_transport_locked, t,
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
@ -678,7 +677,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_slice_buffer_init(&s->frame_storage);
s->pending_byte_stream = false;
grpc_closure_init(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner, false));
grpc_combiner_scheduler(t->combiner));
GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
@ -762,7 +761,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = then_schedule_closure;
grpc_closure_sched(
exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner, false)),
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
}
@ -800,8 +799,6 @@ static const char *write_state_name(grpc_chttp2_write_state st) {
return "WRITING";
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
return "WRITING+MORE";
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
return "WRITING+MORE+COVERED";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
@ -824,8 +821,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason) {
grpc_chttp2_transport *t, const char *reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
@ -834,28 +830,16 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_closure_sched(
exec_ctx,
grpc_closure_init(
&t->write_action_begin_locked, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)),
grpc_closure_init(&t->write_action_begin_locked,
write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state(
exec_ctx, t,
covered_by_poller
? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER
: GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
reason);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
reason);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
if (covered_by_poller) {
set_write_state(
exec_ctx, t,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
reason);
}
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
break;
}
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
@ -871,10 +855,10 @@ void grpc_chttp2_become_writable(
case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
grpc_chttp2_initiate_write(exec_ctx, t, true, reason);
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
grpc_chttp2_initiate_write(exec_ctx, t, false, reason);
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
}
}
@ -911,7 +895,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t,
grpc_combiner_scheduler(t->combiner, false)));
grpc_combiner_scheduler(t->combiner)));
GPR_TIMER_END("write_action", 0);
}
@ -945,23 +929,11 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_closure_run(
exec_ctx,
grpc_closure_init(
&t->write_action_begin_locked, write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
grpc_closure_run(
exec_ctx,
grpc_closure_init(&t->write_action_begin_locked,
write_action_begin_locked, t,
grpc_combiner_finally_scheduler(t->combiner, true)),
grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
break;
}
@ -984,7 +956,7 @@ static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (use_value != t->settings[GRPC_LOCAL_SETTINGS][id]) {
t->settings[GRPC_LOCAL_SETTINGS][id] = use_value;
t->dirtied_local_settings = 1;
grpc_chttp2_initiate_write(exec_ctx, t, false, "push_setting");
grpc_chttp2_initiate_write(exec_ctx, t, "push_setting");
}
}
@ -1380,7 +1352,6 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->next_message_end_offset = s->flow_controlled_bytes_written +
(int64_t)s->flow_controlled_buffer.length +
(int64_t)len;
s->complete_fetch_covered_by_poller = op->covered_by_poller;
if (flags & GRPC_WRITE_BUFFER_HINT) {
s->next_message_end_offset -= t->write_buffer_size;
s->write_buffering = true;
@ -1502,9 +1473,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
grpc_closure_sched(
exec_ctx,
grpc_closure_init(
&op->handler_private.closure, perform_stream_op_locked, op,
grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
grpc_closure_init(&op->handler_private.closure, perform_stream_op_locked,
op, grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0);
}
@ -1531,7 +1501,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_NONE);
if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE)) {
grpc_chttp2_initiate_write(exec_ctx, t, false, "send_ping");
grpc_chttp2_initiate_write(exec_ctx, t, "send_ping");
}
}
@ -1539,7 +1509,7 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t, false, "retry_send_ping");
grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -1554,7 +1524,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
grpc_closure_list_sched(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
grpc_chttp2_initiate_write(exec_ctx, t, false, "continue_pings");
grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings");
}
}
@ -1567,7 +1537,7 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
grpc_chttp2_initiate_write(exec_ctx, t, "goaway_sent");
GRPC_ERROR_UNREF(error);
}
@ -1593,12 +1563,6 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t = op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
}
if (op->goaway_error) {
send_goaway(exec_ctx, t, op->goaway_error);
}
@ -1622,6 +1586,12 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
op->send_ping);
}
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
}
if (close_transport != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, close_transport);
}
@ -1638,11 +1608,11 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(msg);
op->handler_private.extra_arg = gt;
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
grpc_closure_sched(
exec_ctx, grpc_closure_init(&op->handler_private.closure,
perform_transport_op_locked, op,
grpc_combiner_scheduler(t->combiner, false)),
GRPC_ERROR_NONE);
grpc_closure_sched(exec_ctx,
grpc_closure_init(&op->handler_private.closure,
perform_transport_op_locked, op,
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
/*******************************************************************************
@ -1797,7 +1767,7 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
grpc_chttp2_initiate_write(exec_ctx, t, "rst_stream");
}
}
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
@ -2110,7 +2080,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
grpc_chttp2_initiate_write(exec_ctx, t, false, "close_from_api");
grpc_chttp2_initiate_write(exec_ctx, t, "close_from_api");
}
typedef struct {
@ -2641,9 +2611,9 @@ static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
bs->next_action.on_complete = on_complete;
grpc_closure_sched(
exec_ctx,
grpc_closure_init(
&bs->next_action.closure, incoming_byte_stream_next_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner, false)),
grpc_closure_init(&bs->next_action.closure,
incoming_byte_stream_next_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_next", 0);
return false;
@ -2698,10 +2668,9 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_closure_sched(
exec_ctx,
grpc_closure_init(
&bs->destroy_action, incoming_byte_stream_destroy_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner, false)),
exec_ctx, grpc_closure_init(
&bs->destroy_action, incoming_byte_stream_destroy_locked,
bs, grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}

@ -132,7 +132,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
grpc_chttp2_initiate_write(exec_ctx, t, "ping response");
}
}
}

@ -124,8 +124,7 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"new_global_flow_control");
grpc_chttp2_initiate_write(exec_ctx, t, "new_global_flow_control");
}
}
}

@ -1664,7 +1664,7 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
grpc_chttp2_initiate_write(exec_ctx, t, "force_rst_stream");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
@ -1712,9 +1712,9 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst");
grpc_closure_sched(
exec_ctx, grpc_closure_create(force_client_rst_stream, s,
grpc_combiner_finally_scheduler(
t->combiner, false)),
exec_ctx,
grpc_closure_create(force_client_rst_stream, s,
grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,

@ -73,7 +73,6 @@ typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
} grpc_chttp2_write_state;
typedef enum {
@ -458,7 +457,6 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@ -549,8 +547,7 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
grpc_chttp2_transport *t, const char *reason);
typedef enum {
GRPC_CHTTP2_NOTHING_TO_WRITE,

@ -433,7 +433,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
incoming_frame_size);
if (t->incoming_window <= target_incoming_window / 2) {
grpc_chttp2_initiate_write(exec_ctx, t, false, "flow_control");
grpc_chttp2_initiate_write(exec_ctx, t, "flow_control");
}
return GRPC_ERROR_NONE;

@ -206,8 +206,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s) &&
stream_ref_if_not_destroyed(&s->refcount->refs)) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"transport.read_flow_control");
grpc_chttp2_initiate_write(exec_ctx, t, "transport.read_flow_control");
}
}
}

@ -39,7 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
@ -56,93 +56,42 @@ grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
grpc_closure_scheduler uncovered_scheduler;
grpc_closure_scheduler covered_scheduler;
grpc_closure_scheduler uncovered_finally_scheduler;
grpc_closure_scheduler covered_finally_scheduler;
grpc_closure_scheduler scheduler;
grpc_closure_scheduler finally_scheduler;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned (STATE_UNORPHANED)
// other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
// number of elements in the list that are covered by a poller: if >0, we can
// offload safely
gpr_atm elements_covered_by_poller;
bool time_to_execute_final_list;
bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure offload;
gpr_refcount refs;
};
static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure,
grpc_error *error);
static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_closure *closure,
grpc_error *error);
static const grpc_closure_scheduler_vtable scheduler_uncovered = {
combiner_exec_uncovered, combiner_exec_uncovered,
"combiner:immediately:uncovered"};
static const grpc_closure_scheduler_vtable scheduler_covered = {
combiner_exec_covered, combiner_exec_covered,
"combiner:immediately:covered"};
static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
combiner_finally_exec_uncovered, combiner_finally_exec_uncovered,
"combiner:finally:uncovered"};
static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
combiner_finally_exec_covered, combiner_finally_exec_covered,
"combiner:finally:covered"};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
grpc_error *error;
bool covered_by_poller;
} error_data;
static const grpc_closure_scheduler_vtable scheduler = {
combiner_exec, combiner_exec, "combiner:immediately"};
static const grpc_closure_scheduler_vtable finally_scheduler = {
combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
static uintptr_t pack_error_data(error_data d) {
return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
}
static error_data unpack_error_data(uintptr_t p) {
return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
}
static bool is_covered_by_poller(grpc_combiner *lock) {
return lock->final_list_covered_by_poller ||
gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
}
#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d"
#define IS_COVERED_BY_POLLER_ARGS(lock) \
(lock)->final_list_covered_by_poller, \
gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \
is_covered_by_poller((lock))
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *grpc_combiner_create(void) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
lock->final_list_covered_by_poller = false;
lock->uncovered_scheduler.vtable = &scheduler_uncovered;
lock->covered_scheduler.vtable = &scheduler_covered;
lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock,
grpc_workqueue_scheduler(lock->optional_workqueue));
grpc_closure_init(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -151,7 +100,6 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner");
gpr_free(lock);
}
@ -208,21 +156,21 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
}
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error,
bool covered_by_poller) {
#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
((grpc_combiner *)(((char *)((closure)->scheduler)) - \
offsetof(grpc_combiner, scheduler_name)))
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.execute", 0);
grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
cl, covered_by_poller, last));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
assert(cl->cb);
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
}
cl->error_data.error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// first element on this list: add it to the list of combiner locks
@ -232,24 +180,6 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_TIMER_END("combiner.execute", 0);
}
#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
((grpc_combiner *)(((char *)((closure)->scheduler)) - \
offsetof(grpc_combiner, scheduler_name)))
static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
combiner_exec(exec_ctx,
COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
error, false);
}
static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
combiner_exec(exec_ctx,
COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
error, true);
}
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@ -265,8 +195,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
@ -278,18 +207,14 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
return false;
}
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
"is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT
" exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock),
grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
grpc_exec_ctx_ready_to_finish(exec_ctx)) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx "
"exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
if (grpc_exec_ctx_ready_to_finish(exec_ctx) && grpc_executor_is_threaded()) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
@ -310,29 +235,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
queue_offload(exec_ctx, lock);
}
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
error_data err = unpack_error_data(cl->error_data.scratch);
grpc_error *cl_err = cl->error_data.error;
#ifndef NDEBUG
cl->scheduled = false;
#endif
cl->cb(exec_ctx, cl->cb_arg, err.error);
if (err.covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
}
GRPC_ERROR_UNREF(err.error);
cl->cb(exec_ctx, cl->cb_arg, cl_err);
GRPC_ERROR_UNREF(cl_err);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
@ -398,20 +317,20 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error);
static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock, grpc_closure *closure,
grpc_error *error,
bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
closure, exec_ctx->active_combiner, covered_by_poller));
static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error) {
grpc_combiner *lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p; ac=%p",
lock, closure, exec_ctx->active_combiner));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_closure_sched(
exec_ctx, grpc_closure_create(enqueue_finally, closure,
grpc_combiner_scheduler(lock, false)),
error);
grpc_closure_sched(exec_ctx,
grpc_closure_create(enqueue_finally, closure,
grpc_combiner_scheduler(lock)),
error);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
@ -419,42 +338,20 @@ static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
}
if (covered_by_poller) {
lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
GRPC_ERROR_REF(error), false);
}
static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
grpc_closure *cl,
grpc_error *error) {
combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
cl, uncovered_finally_scheduler),
cl, error, false);
}
static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_closure *cl, grpc_error *error) {
combiner_execute_finally(
exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
cl, error, true);
combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
bool covered_by_poller) {
return covered_by_poller ? &combiner->covered_scheduler
: &combiner->uncovered_scheduler;
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
return &combiner->scheduler;
}
grpc_closure_scheduler *grpc_combiner_finally_scheduler(
grpc_combiner *combiner, bool covered_by_poller) {
return covered_by_poller ? &combiner->covered_finally_scheduler
: &combiner->uncovered_finally_scheduler;
grpc_combiner *combiner) {
return &combiner->finally_scheduler;
}

@ -48,7 +48,7 @@
// Initialize the lock, with an optional workqueue to shift load to when
// necessary
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
grpc_combiner *grpc_combiner_create(void);
//#define GRPC_COMBINER_REFCOUNT_DEBUG
#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
@ -71,11 +71,9 @@ grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
void grpc_combiner_unref(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
// Fetch a scheduler to schedule closures against
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
bool covered_by_poller);
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock);
// Scheduler to execute \a action within the lock just prior to unlocking.
grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
bool covered_by_poller);
grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);

@ -69,10 +69,6 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep);
}
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}

@ -52,7 +52,6 @@ struct grpc_endpoint_vtable {
grpc_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -78,9 +77,6 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep);
*/
int grpc_endpoint_get_fd(grpc_endpoint *ep);
/* Retrieve a reference to the workqueue associated with this endpoint */
grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it

@ -58,7 +58,6 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@ -130,7 +129,9 @@ struct grpc_pollset {
* Pollset-set Declarations
*/
struct grpc_pollset_set {};
struct grpc_pollset_set {
char unused;
};
/*******************************************************************************
* Common helpers
@ -283,10 +284,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
return (grpc_workqueue *)0xb0b51ed;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
@ -313,8 +310,6 @@ GPR_TLS_DECL(g_current_thread_worker);
static gpr_atm g_active_poller;
static pollset_neighbourhood *g_neighbourhoods;
static size_t g_num_neighbourhoods;
static gpr_mu g_wq_mu;
static grpc_closure_list g_wq_items;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@ -363,8 +358,6 @@ static grpc_error *pollset_global_init(void) {
gpr_atm_no_barrier_store(&g_active_poller, 0);
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
gpr_mu_init(&g_wq_mu);
g_wq_items = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
.data.ptr = &global_wakeup_fd};
@ -383,7 +376,6 @@ static grpc_error *pollset_global_init(void) {
static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
gpr_mu_destroy(&g_wq_mu);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_destroy(&g_neighbourhoods[i].mu);
@ -507,9 +499,6 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
for (int i = 0; i < r; i++) {
void *data_ptr = events[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
gpr_mu_lock(&g_wq_mu);
grpc_closure_list_move(&g_wq_items, &exec_ctx->closure_list);
gpr_mu_unlock(&g_wq_mu);
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
@ -791,84 +780,6 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {}
/*******************************************************************************
* Workqueue Definitions
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void wq_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
// find a neighbourhood to wakeup
bool scheduled = false;
size_t initial_neighbourhood = choose_neighbourhood();
for (size_t i = 0; !scheduled && i < g_num_neighbourhoods; i++) {
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(initial_neighbourhood + i) % g_num_neighbourhoods];
if (gpr_mu_trylock(&neighbourhood->mu)) {
if (neighbourhood->active_root != NULL) {
grpc_pollset *inspect = neighbourhood->active_root;
do {
if (gpr_mu_trylock(&inspect->mu)) {
if (inspect->root_worker != NULL) {
grpc_pollset_worker *inspect_worker = inspect->root_worker;
do {
if (inspect_worker->kick_state == UNKICKED) {
inspect_worker->kick_state = KICKED;
grpc_closure_list_append(
&inspect_worker->schedule_on_end_work, closure, error);
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
scheduled = true;
}
inspect_worker = inspect_worker->next;
} while (!scheduled && inspect_worker != inspect->root_worker);
}
gpr_mu_unlock(&inspect->mu);
}
inspect = inspect->next;
} while (!scheduled && inspect != neighbourhood->active_root);
}
gpr_mu_unlock(&neighbourhood->mu);
}
}
if (!scheduled) {
gpr_mu_lock(&g_wq_mu);
grpc_closure_list_append(&g_wq_items, closure, error);
gpr_mu_unlock(&g_wq_mu);
GRPC_LOG_IF_ERROR("workqueue_scheduler",
grpc_wakeup_fd_wakeup(&global_wakeup_fd));
}
}
static const grpc_closure_scheduler_vtable
singleton_workqueue_scheduler_vtable = {wq_sched, wq_sched,
"epoll1_workqueue"};
static grpc_closure_scheduler singleton_workqueue_scheduler = {
&singleton_workqueue_scheduler_vtable};
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
return &singleton_workqueue_scheduler;
}
/*******************************************************************************
* Pollset-set Definitions
*/
@ -920,7 +831,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -938,10 +848,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -61,7 +61,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/env.h"
@ -184,13 +183,15 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
//#define PI_REFCOUNT_DEBUG
#ifdef PI_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#else /* defined(PI_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
@ -204,8 +205,6 @@ typedef struct worker_node {
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
grpc_closure_scheduler workqueue_scheduler;
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@ -226,15 +225,6 @@ typedef struct polling_island {
/* Number of threads currently polling on this island */
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* The list of workers waiting to do polling on this polling island */
gpr_mu worker_list_mu;
@ -323,8 +313,6 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@ -337,13 +325,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue, "workqueue"};
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#ifdef PI_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
@ -359,36 +344,6 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
@ -592,17 +547,12 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
@ -610,11 +560,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&pi->worker_list_mu);
worker_node_init(&pi->worker_list_head);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@ -622,8 +567,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
@ -642,11 +585,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_mu_destroy(&pi->worker_list_mu);
GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
@ -794,45 +733,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@ -857,8 +757,6 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
@ -869,32 +767,6 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
polling_island *pi = (polling_island *)workqueue;
return workqueue == NULL ? grpc_schedule_on_exec_ctx
: &pi->workqueue_scheduler;
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -1153,14 +1025,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->po.mu);
grpc_workqueue *workqueue =
GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
gpr_mu_unlock(&fd->po.mu);
return workqueue;
}
/*******************************************************************************
* Pollset Definitions
*/
@ -1432,33 +1296,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_maybe_wakeup(pi);
}
}
return false;
}
/* NOTE: This function may modify 'now' */
static bool acquire_polling_lease(grpc_pollset_worker *worker,
polling_island *pi, gpr_timespec deadline,
@ -1594,12 +1431,7 @@ static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
@ -1675,15 +1507,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
g_current_thread_polling_island = pi;
pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now,
deadline, sig_mask, error);
g_current_thread_polling_island = NULL;
}
g_current_thread_polling_island = pi;
pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
sig_mask, error);
g_current_thread_polling_island = NULL;
GPR_ASSERT(pi != NULL);
@ -2036,7 +1863,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -2054,10 +1880,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -61,7 +61,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@ -109,23 +108,22 @@ static void fd_global_shutdown(void);
* epoll set Declarations
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
//#define EPS_REFCOUNT_DEBUG
#ifdef EPS_REFCOUNT_DEBUG
#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define EPS_UNREF(exec_ctx, p, r) \
eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#else /* defined(EPS_REFCOUNT_DEBUG) */
#define EPS_ADD_REF(p, r) eps_add_ref((p))
#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
#endif /* !defined(GRPC_EPS_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casting it) */
typedef struct epoll_set {
grpc_closure_scheduler workqueue_scheduler;
/* Mutex poller should acquire to poll this. This enforces that only one
* poller can be polling on epoll_set at any time */
gpr_mu mu;
@ -139,15 +137,6 @@ typedef struct epoll_set {
/* Number of threads currently polling on this epoll set*/
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* Is the epoll set shutdown */
gpr_atm is_shutdown;
@ -181,7 +170,9 @@ struct grpc_pollset {
/*******************************************************************************
* Pollset-set Declarations
*/
struct grpc_pollset_set {};
struct grpc_pollset_set {
char unused;
};
/*****************************************************************************
* Dedicated polling threads and pollsets - Declarations
@ -235,8 +226,6 @@ static __thread epoll_set *g_current_thread_epoll_set;
/* Forward declaration */
static void epoll_set_delete(epoll_set *eps);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@ -249,13 +238,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue, "workqueue"};
static void eps_add_ref(epoll_set *eps);
static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#ifdef EPS_REFCOUNT_DEBUG
static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&eps->ref_count);
@ -271,36 +257,6 @@ static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
gpr_log(GPR_DEBUG, "Unref eps: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)eps, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
eps_add_ref_dbg((epoll_set *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
eps_unref_dbg(exec_ctx, (epoll_set *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
eps_add_ref((epoll_set *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
eps_unref(exec_ctx, (epoll_set *)workqueue);
}
}
#endif
static void eps_add_ref(epoll_set *eps) {
@ -394,24 +350,15 @@ static epoll_set *epoll_set_create(grpc_error **error) {
*error = GRPC_ERROR_NONE;
eps = gpr_malloc(sizeof(*eps));
eps->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
eps->epoll_fd = -1;
gpr_mu_init(&eps->mu);
gpr_mu_init(&eps->workqueue_read_mu);
gpr_mpscq_init(&eps->workqueue_items);
gpr_atm_rel_store(&eps->workqueue_item_count, 0);
gpr_atm_rel_store(&eps->ref_count, 0);
gpr_atm_rel_store(&eps->poller_count, 0);
gpr_atm_rel_store(&eps->is_shutdown, false);
if (!append_error(error, grpc_wakeup_fd_init(&eps->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (eps->epoll_fd < 0) {
@ -419,8 +366,6 @@ static epoll_set *epoll_set_create(grpc_error **error) {
goto done;
}
epoll_set_add_wakeup_fd_locked(eps, &eps->workqueue_wakeup_fd, error);
done:
if (*error != GRPC_ERROR_NONE) {
epoll_set_delete(eps);
@ -434,57 +379,11 @@ static void epoll_set_delete(epoll_set *eps) {
close(eps->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&eps->workqueue_item_count) == 0);
gpr_mu_destroy(&eps->mu);
gpr_mu_destroy(&eps->workqueue_read_mu);
gpr_mpscq_destroy(&eps->workqueue_items);
grpc_wakeup_fd_destroy(&eps->workqueue_wakeup_fd);
gpr_free(eps);
}
static void workqueue_maybe_wakeup(epoll_set *eps) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_epoll_set == eps);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&eps->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&eps->workqueue_wakeup_fd));
}
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
epoll_set *eps = (epoll_set *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&eps->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&eps->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(eps);
}
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
epoll_set *eps = (epoll_set *)workqueue;
return workqueue == NULL ? grpc_schedule_on_exec_ctx
: &eps->workqueue_scheduler;
}
static grpc_error *epoll_set_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -680,8 +579,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
/*******************************************************************************
* Pollset Definitions
*/
@ -865,32 +762,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, epoll_set *eps) {
if (gpr_mu_trylock(&eps->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&eps->workqueue_items);
gpr_mu_unlock(&eps->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&eps->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(eps);
}
grpc_closure *c = (grpc_closure *)n;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
return true;
} else if (gpr_atm_no_barrier_load(&eps->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_maybe_wakeup(eps);
}
}
return false;
}
/* Blocking call */
static void acquire_epoll_lease(epoll_set *eps) {
if (g_num_threads_per_eps > 1) {
@ -934,12 +805,7 @@ static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &eps->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&eps->workqueue_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, eps);
} else if (data_ptr == &epoll_set_wakeup_fd) {
if (data_ptr == &epoll_set_wakeup_fd) {
gpr_atm_rel_store(&eps->is_shutdown, 1);
gpr_log(GPR_INFO, "pollset poller: shutdown set");
} else {
@ -966,18 +832,13 @@ static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps,
epoll set. */
epoll_fd = eps->epoll_fd;
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
if (!maybe_do_workqueue_work(exec_ctx, eps)) {
gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
g_current_thread_epoll_set = eps;
gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
g_current_thread_epoll_set = eps;
do_epoll_wait(exec_ctx, epoll_fd, eps, error);
do_epoll_wait(exec_ctx, epoll_fd, eps, error);
g_current_thread_epoll_set = NULL;
gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
}
g_current_thread_epoll_set = NULL;
gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
GPR_TIMER_END("epoll_set_work", 0);
}
@ -1120,7 +981,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -1138,10 +998,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -59,7 +59,6 @@
#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/spinlock.h"
@ -139,17 +138,6 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
grpc_closure_scheduler workqueue_scheduler;
/* Spinlock guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_spinlock workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* The fd is either closed or we relinquished control of it. In either
cases, this indicates that the 'fd' on this structure is no longer
valid */
@ -172,12 +160,6 @@ struct grpc_fd {
static void fd_global_init(void);
static void fd_global_shutdown(void);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue, "workqueue"};
/*******************************************************************************
* Pollset Declarations
*/
@ -347,13 +329,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
grpc_lfev_init(&new_fd->write_closure);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
GRPC_LOG_IF_ERROR("fd_create",
grpc_wakeup_fd_init(&new_fd->workqueue_wakeup_fd));
new_fd->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
new_fd->workqueue_read_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mpscq_init(&new_fd->workqueue_items);
gpr_atm_no_barrier_store(&new_fd->workqueue_item_count, 0);
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
@ -446,91 +421,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
REF_BY(fd, 2, "return_workqueue");
return (grpc_workqueue *)fd;
}
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
ref_by((grpc_fd *)workqueue, 2, file, line, reason);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
unref_by(exec_ctx, (grpc_fd *)workqueue, 2, file, line, reason);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
ref_by((grpc_fd *)workqueue, 2);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
unref_by(exec_ctx, (grpc_fd *)workqueue, 2);
}
}
#endif
static void workqueue_wakeup(grpc_fd *fd) {
GRPC_LOG_IF_ERROR("workqueue_enqueue",
grpc_wakeup_fd_wakeup(&fd->workqueue_wakeup_fd));
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_fd *fd = (grpc_fd *)(((char *)closure->scheduler) -
offsetof(grpc_fd, workqueue_scheduler));
REF_BY(fd, 2, "workqueue_enqueue");
gpr_atm last = gpr_atm_no_barrier_fetch_add(&fd->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&fd->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_wakeup(fd);
}
UNREF_BY(exec_ctx, fd, 2, "workqueue_enqueue");
}
static void fd_invoke_workqueue(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
/* handle spurious wakeups */
if (!gpr_spinlock_trylock(&fd->workqueue_read_mu)) return;
gpr_mpscq_node *n = gpr_mpscq_pop(&fd->workqueue_items);
gpr_spinlock_unlock(&fd->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&fd->workqueue_item_count, -1) > 1) {
workqueue_wakeup(fd);
}
grpc_closure *c = (grpc_closure *)n;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
} else if (gpr_atm_no_barrier_load(&fd->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_wakeup(fd);
}
}
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
return &((grpc_fd *)workqueue)->workqueue_scheduler;
}
/*******************************************************************************
* Pollable Definitions
*/
@ -596,22 +486,7 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
.data.ptr = fd};
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
case EEXIST: /* if this fd is already in the epoll set, the workqueue fd
must also be - just return */
gpr_mu_unlock(&fd->orphaned_mu);
return GRPC_ERROR_NONE;
default:
append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
}
}
struct epoll_event ev_wq = {
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE),
.data.ptr = (void *)(1 + (intptr_t)fd)};
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->workqueue_wakeup_fd.read_fd, &ev_wq) !=
0) {
switch (errno) {
case EEXIST: /* if the workqueue fd is already in the epoll set we're ok
- no need to do anything special */
case EEXIST:
break;
default:
append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
@ -874,29 +749,21 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
append_error(&error, grpc_wakeup_fd_consume_wakeup(&p->wakeup), err_desc);
} else {
grpc_fd *fd = (grpc_fd *)(((intptr_t)data_ptr) & ~(intptr_t)1);
bool is_workqueue = (((intptr_t)data_ptr) & 1) != 0;
grpc_fd *fd = (grpc_fd *)data_ptr;
bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (events[i].events & EPOLLOUT) != 0;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p poll %p got fd %p: is_wq=%d cancel=%d read=%d "
"PS:%p poll %p got fd %p: cancel=%d read=%d "
"write=%d",
pollset, p, fd, is_workqueue, cancel, read_ev, write_ev);
pollset, p, fd, cancel, read_ev, write_ev);
}
if (is_workqueue) {
append_error(&error,
grpc_wakeup_fd_consume_wakeup(&fd->workqueue_wakeup_fd),
err_desc);
fd_invoke_workqueue(exec_ctx, fd);
} else {
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
}
}
@ -1449,7 +1316,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -1467,10 +1333,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -59,7 +59,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@ -177,7 +176,9 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
//#define PI_REFCOUNT_DEBUG
#ifdef PI_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
@ -192,8 +193,6 @@ static void fd_global_shutdown(void);
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
grpc_closure_scheduler workqueue_scheduler;
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@ -214,15 +213,6 @@ typedef struct polling_island {
/* Number of threads currently polling on this island */
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@ -297,8 +287,6 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@ -311,13 +299,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
workqueue_enqueue, workqueue_enqueue, "workqueue"};
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#ifdef PI_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
@ -333,36 +318,6 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
@ -526,26 +481,16 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@ -553,8 +498,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
@ -573,11 +516,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@ -722,45 +661,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers >= min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@ -785,8 +685,6 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
@ -797,32 +695,6 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
polling_island *pi = (polling_island *)workqueue;
return workqueue == NULL ? grpc_schedule_on_exec_ctx
: &pi->workqueue_scheduler;
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -1081,14 +953,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->po.mu);
grpc_workqueue *workqueue =
GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
gpr_mu_unlock(&fd->po.mu);
return workqueue;
}
/*******************************************************************************
* Pollset Definitions
*/
@ -1326,44 +1190,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
gpr_atm remaining =
gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) - 1;
GRPC_POLLING_TRACE(
"maybe_do_workqueue_work: pi: %p: got closure %p, remaining = "
"%" PRIdPTR,
pi, n, remaining);
if (remaining > 0) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
GRPC_POLLING_TRACE(
"maybe_do_workqueue_work: pi: %p: more to do, but not yet", pi);
workqueue_maybe_wakeup(pi);
}
} else {
GRPC_POLLING_TRACE("maybe_do_workqueue_work: pi: %p: read already locked",
pi);
}
return false;
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
@ -1419,76 +1245,61 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker %p, pi %p", pollset,
worker, pi);
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
GRPC_POLLING_TRACE("pollset_work: begins");
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
"epoll_wait() epoll fd: %d failed with error: %d (%s)",
epoll_fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
} else {
/* We were interrupted. Save an interation by doing a zero timeout
epoll_wait to see if there are any other events of interest */
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p received kick",
(void *)pollset, (void *)worker);
ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
}
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv =
epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
"epoll_wait() epoll fd: %d failed with error: %d (%s)",
epoll_fd, errno, strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
} else {
/* We were interrupted. Save an interation by doing a zero timeout
epoll_wait to see if there are any other events of interest */
GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
(void *)pollset, (void *)worker);
ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
}
}
#ifdef GRPC_TSAN
/* See the definition of g_poll_sync for more details */
gpr_atm_acq_load(&g_epoll_sync);
/* See the definition of g_poll_sync for more details */
gpr_atm_acq_load(&g_epoll_sync);
#endif /* defined(GRPC_TSAN) */
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
(void *)pollset, (void *)worker, epoll_fd);
/* This means that our polling island is merged with a different
island. We do not have to do anything here since the subsequent call
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
grpc_fd *fd = data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
(void *)pollset, (void *)worker, epoll_fd);
/* This means that our polling island is merged with a different
island. We do not have to do anything here since the subsequent call
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
grpc_fd *fd = data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
}
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
GRPC_POLLING_TRACE("pollset_work: ends");
}
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
GPR_ASSERT(pi != NULL);
/* Before leaving, release the extra ref we added to the polling island. It
@ -1879,7 +1690,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -1897,10 +1707,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -648,8 +648,6 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GRPC_FD_UNREF(fd, "poll");
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
/*******************************************************************************
* pollset_posix.c
*/
@ -1288,30 +1286,6 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
return grpc_schedule_on_exec_ctx;
}
/*******************************************************************************
* Condition Variable polling extensions
*/
@ -1529,7 +1503,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@ -1547,10 +1520,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};

@ -171,10 +171,6 @@ grpc_fd *grpc_fd_create(int fd, const char *name) {
return g_event_engine->fd_create(fd, name);
}
grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) {
return g_event_engine->fd_get_workqueue(fd);
}
int grpc_fd_wrapped_fd(grpc_fd *fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
@ -276,26 +272,4 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return g_event_engine->workqueue_ref(workqueue, file, line, reason);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return g_event_engine->workqueue_ref(workqueue);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
g_event_engine->workqueue_unref(exec_ctx, workqueue);
}
#endif
grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
return g_event_engine->workqueue_scheduler(workqueue);
}
#endif // GRPC_POSIX_SOCKET

@ -41,7 +41,6 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
@ -60,7 +59,6 @@ typedef struct grpc_event_engine_vtable {
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
bool (*fd_is_shutdown)(grpc_fd *fd);
grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd);
grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
grpc_fd *fd);
@ -97,17 +95,6 @@ typedef struct grpc_event_engine_vtable {
grpc_pollset_set *pollset_set, grpc_fd *fd);
void (*shutdown_engine)(void);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
@ -121,9 +108,6 @@ const char *grpc_get_poll_strategy_name();
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
/* Get a workqueue that's associated with this fd */
grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd);
/* Return the wrapped fd, or -1 if it has been released or closed. */
int grpc_fd_wrapped_fd(grpc_fd *fd);

@ -38,7 +38,6 @@
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {

@ -36,132 +36,175 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
#define MAX_DEPTH 2
typedef struct grpc_executor_data {
int busy; /**< is the thread currently running? */
int shutting_down; /**< has \a grpc_shutdown() been invoked? */
int pending_join; /**< has the thread finished but not been joined? */
grpc_closure_list closures; /**< collection of pending work */
gpr_thd_id tid; /**< thread id of the thread, only valid if \a busy or \a
pending_join are true */
gpr_thd_options options;
typedef struct {
gpr_mu mu;
} grpc_executor;
gpr_cv cv;
grpc_closure_list elems;
size_t depth;
bool shutdown;
gpr_thd_id id;
} thread_state;
static grpc_executor g_executor;
static thread_state *g_thread_state;
static size_t g_max_threads;
static gpr_atm g_cur_threads;
static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
void grpc_executor_init() {
memset(&g_executor, 0, sizeof(grpc_executor));
gpr_mu_init(&g_executor.mu);
g_executor.options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&g_executor.options);
}
GPR_TLS_DECL(g_this_thread_state);
/* thread body */
static void closure_exec_thread_func(void *ignored) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (1) {
gpr_mu_lock(&g_executor.mu);
if (g_executor.shutting_down != 0) {
gpr_mu_unlock(&g_executor.mu);
break;
}
if (grpc_closure_list_empty(g_executor.closures)) {
/* no more work, time to die */
GPR_ASSERT(g_executor.busy == 1);
g_executor.busy = 0;
gpr_mu_unlock(&g_executor.mu);
break;
} else {
grpc_closure *c = g_executor.closures.head;
grpc_closure_list_init(&g_executor.closures);
gpr_mu_unlock(&g_executor.mu);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
static void executor_thread(void *arg);
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
size_t n = 0;
grpc_closure *c = list.head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
c->scheduled = false;
#endif
c->cb(&exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
}
grpc_exec_ctx_flush(&exec_ctx);
}
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
}
grpc_exec_ctx_finish(&exec_ctx);
return n;
}
/* Spawn the thread if new work has arrived a no thread is up */
static void maybe_spawn_locked() {
if (grpc_closure_list_empty(g_executor.closures) == 1) {
return;
}
if (g_executor.shutting_down == 1) {
return;
}
bool grpc_executor_is_threaded() {
return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
}
if (g_executor.busy != 0) {
/* Thread still working. New work will be picked up by already running
* thread. Not spawning anything. */
return;
} else if (g_executor.pending_join != 0) {
/* Pickup the remains of the previous incarnations of the thread. */
gpr_thd_join(g_executor.tid);
g_executor.pending_join = 0;
void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
if (threading) {
if (cur_threads > 0) return;
g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
&opt);
} else {
if (cur_threads == 0) return;
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_lock(&g_thread_state[i].mu);
g_thread_state[i].shutdown = true;
gpr_cv_signal(&g_thread_state[i].cv);
gpr_mu_unlock(&g_thread_state[i].mu);
}
/* ensure no thread is adding a new thread... once this is past, then
no thread will try to add a new one either (since shutdown is true) */
gpr_spinlock_lock(&g_adding_thread_lock);
gpr_spinlock_unlock(&g_adding_thread_lock);
for (gpr_atm i = 0; i < g_cur_threads; i++) {
gpr_thd_join(g_thread_state[i].id);
}
gpr_atm_no_barrier_store(&g_cur_threads, 0);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
run_closures(exec_ctx, g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
}
}
/* All previous instances of the thread should have been joined at this point.
* Spawn time! */
g_executor.busy = 1;
GPR_ASSERT(gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
&g_executor.options));
g_executor.pending_join = 1;
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
gpr_mu_lock(&g_executor.mu);
if (g_executor.shutting_down == 0) {
grpc_closure_list_append(&g_executor.closures, closure, error);
maybe_spawn_locked();
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
grpc_executor_set_threading(exec_ctx, false);
}
static void executor_thread(void *arg) {
thread_state *ts = arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
size_t subtract_depth = 0;
for (;;) {
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
gpr_mu_unlock(&ts->mu);
break;
}
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
subtract_depth = run_closures(&exec_ctx, exec);
grpc_exec_ctx_flush(&exec_ctx);
}
gpr_mu_unlock(&g_executor.mu);
grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
int pending_join;
gpr_mu_lock(&g_executor.mu);
pending_join = g_executor.pending_join;
g_executor.shutting_down = 1;
gpr_mu_unlock(&g_executor.mu);
/* we can release the lock at this point despite the access to the closure
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
grpc_closure *c = g_executor.closures.head;
grpc_closure_list_init(&g_executor.closures);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count == 0) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
}
grpc_exec_ctx_flush(exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {
gpr_thd_join(g_executor.tid);
gpr_mu_lock(&ts->mu);
if (grpc_closure_list_empty(ts->elems)) {
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++;
bool try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
gpr_mu_unlock(&ts->mu);
cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_log(GPR_DEBUG, "Creating internal grpc thread #%" PRIdPTR,
cur_thread_count + 1);
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
&g_thread_state[cur_thread_count], &opt);
}
gpr_spinlock_unlock(&g_adding_thread_lock);
} else {
gpr_mu_unlock(&ts->mu);
}
gpr_mu_destroy(&g_executor.mu);
}
static const grpc_closure_scheduler_vtable executor_vtable = {

@ -41,11 +41,18 @@
* This mechanism is meant to outsource work (grpc_closure instances) to a
* thread, for those cases where blocking isn't an option but there isn't a
* non-blocking solution available. */
void grpc_executor_init();
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
extern grpc_closure_scheduler *grpc_executor_scheduler;
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
/** Is the executor multi-threaded? */
bool grpc_executor_is_threaded();
/* enable/disable threading - must be called after grpc_executor_init and before
grpc_executor_shutdown */
void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable);
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */

@ -44,6 +44,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
@ -56,11 +57,12 @@ static gpr_cv g_rcv;
static int g_shutdown;
static grpc_iomgr_object g_root_object;
void grpc_iomgr_init(void) {
void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
grpc_exec_ctx_global_init();
grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
@ -68,7 +70,7 @@ void grpc_iomgr_init(void) {
grpc_iomgr_platform_init();
}
void grpc_iomgr_start(void) { grpc_timer_manager_init(); }
void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); }
static size_t count_objects(void) {
grpc_iomgr_object *obj;
@ -93,6 +95,7 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
grpc_timer_manager_shutdown();
grpc_iomgr_platform_flush();
grpc_executor_shutdown(exec_ctx);
gpr_mu_lock(&g_mu);
g_shutdown = 1;

@ -38,10 +38,10 @@
#include "src/core/lib/iomgr/port.h"
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
void grpc_iomgr_init(grpc_exec_ctx *exec_ctx);
/** Starts any background threads for iomgr. */
void grpc_iomgr_start(void);
void grpc_iomgr_start(grpc_exec_ctx *exec_ctx);
/** Signals the intention to shutdown the iomgr. Expects to be able to flush
* exec_ctx. */

@ -581,7 +581,7 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create(NULL);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
@ -594,12 +594,11 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota);
}
grpc_closure_init(
&resource_quota->rq_step_closure, rq_step, resource_quota,
grpc_combiner_finally_scheduler(resource_quota->combiner, true));
grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota,
grpc_combiner_finally_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
rq_reclamation_done, resource_quota,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_quota->roots[i] = NULL;
}
@ -704,18 +703,18 @@ grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota_ref_internal(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->add_to_free_pool_closure,
&ru_add_to_free_pool, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[0],
&ru_post_benign_reclaimer, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[1],
&ru_post_destructive_reclaimer, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_combiner_scheduler(resource_quota->combiner));
gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1);
gpr_atm_rel_store(&resource_user->shutdown, 0);
@ -772,12 +771,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
grpc_closure_sched(exec_ctx,
grpc_closure_create(
ru_shutdown, resource_user,
grpc_combiner_scheduler(
resource_user->resource_quota->combiner, false)),
GRPC_ERROR_NONE);
grpc_closure_sched(
exec_ctx,
grpc_closure_create(
ru_shutdown, resource_user,
grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
GRPC_ERROR_NONE);
}
}

@ -558,26 +558,15 @@ static int tcp_get_fd(grpc_endpoint *ep) {
return tcp->fd;
}
static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return grpc_fd_get_workqueue(tcp->em_fd);
}
static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return tcp->resource_user;
}
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_get_workqueue,
tcp_add_to_pollset,
tcp_add_to_pollset_set,
tcp_shutdown,
tcp_destroy,
tcp_get_resource_user,
tcp_get_peer,
tcp_get_fd};
static const grpc_endpoint_vtable vtable = {
tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
tcp_shutdown, tcp_destroy, tcp_get_resource_user, tcp_get_peer,
tcp_get_fd};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024

@ -1,87 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/port.h"
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
#endif
/* grpc_workqueue is forward declared in exec_ctx.h */
/* Reference counting functions. Use the macro's always
(GRPC_WORKQUEUE_{REF,UNREF}).
Pass in a descriptive reason string for reffing/unreffing as the last
argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that
string will be printed alongside the refcount. When it is not defined, the
string will be discarded at compilation time. */
/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
/** Fetch the workqueue closure scheduler. Items added to a work queue will be
started in approximately the order they were enqueued, on some thread that
may or may not be the current thread. Successive closures enqueued onto a
workqueue MAY be executed concurrently.
It is generally more expensive to add a closure to a workqueue than to the
execution context, both in terms of CPU work and in execution latency.
Use work queues when it's important that other threads be given a chance to
tackle some workload. */
grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */

@ -1,65 +0,0 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_UV
#include "src/core/lib/iomgr/workqueue.h"
// Minimal implementation of grpc_workqueue for libuv
// Works by directly enqueuing workqueue items onto the current execution
// context, which is at least correct, if not performant or in the spirit of
// workqueues.
void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
return grpc_schedule_on_exec_ctx;
}
#endif /* GPR_UV */

@ -1,37 +0,0 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H */

@ -1,63 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue.h"
// Minimal implementation of grpc_workqueue for Windows
// Works by directly enqueuing workqueue items onto the current execution
// context, which is at least correct, if not performant or in the spirit of
// workqueues.
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
return grpc_schedule_on_exec_ctx;
}
#endif /* GPR_WINDOWS */

@ -1,37 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H */

@ -65,7 +65,8 @@ typedef struct {
*/
grpc_polling_entity *pollent;
grpc_transport_stream_op_batch op;
uint8_t security_context_set;
gpr_atm security_context_set;
gpr_mu security_context_mu;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
} call_data;
@ -253,19 +254,26 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *l;
grpc_client_security_context *sec_ctx = NULL;
if (!op->cancel_stream && calld->security_context_set == 0) {
calld->security_context_set = 1;
GPR_ASSERT(op->payload->context != NULL);
if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
op->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create();
op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
if (!op->cancel_stream) {
/* double checked lock over security context to ensure it's set once */
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
gpr_mu_lock(&calld->security_context_mu);
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
GPR_ASSERT(op->payload->context != NULL);
if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
op->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create();
op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
gpr_atm_rel_store(&calld->security_context_set, 1);
}
gpr_mu_unlock(&calld->security_context_mu);
}
sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (op->send_initial_metadata) {
@ -312,6 +320,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
gpr_mu_init(&calld->security_context_mu);
return GRPC_ERROR_NONE;
}
@ -335,6 +344,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
gpr_mu_destroy(&calld->security_context_mu);
}
/* Constructor for channel_data */

@ -380,11 +380,6 @@ static int endpoint_get_fd(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_fd(ep->wrapped_ep);
}
static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
}
static grpc_resource_user *endpoint_get_resource_user(
grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
@ -393,7 +388,6 @@ static grpc_resource_user *endpoint_get_resource_user(
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_get_workqueue,
endpoint_add_to_pollset,
endpoint_add_to_pollset_set,
endpoint_shutdown,

@ -1468,7 +1468,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op_batch *stream_op = &bctl->op;
grpc_transport_stream_op_batch_payload *stream_op_payload =
&call->stream_op_payload;
stream_op->covered_by_poller = true;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@ -1657,10 +1656,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
/* IF this is a server, then GRPC_OP_RECV_INITIAL_METADATA *must* come
from server.c. In that case, it's coming from accept_stream, and in
that case we're not necessarily covered by a poller. */
stream_op->covered_by_poller = call->is_client;
call->received_initial_metadata = true;
call->buffered_metadata[0] =
op->data.recv_initial_metadata.recv_initial_metadata;

@ -128,6 +128,7 @@ void grpc_init(void) {
int i;
gpr_once_init(&g_basic_init, do_basic_init);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
gpr_time_init();
@ -154,8 +155,7 @@ void grpc_init(void) {
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();
grpc_iomgr_init(&exec_ctx);
gpr_timers_global_init();
grpc_handshaker_factory_registry_init();
grpc_security_init();
@ -171,19 +171,20 @@ void grpc_init(void) {
grpc_tracer_init("GRPC_TRACE");
/* no more changes to channel init pipelines */
grpc_channel_init_finalize();
grpc_iomgr_start();
grpc_iomgr_start(&exec_ctx);
}
gpr_mu_unlock(&g_init_mu);
grpc_exec_ctx_finish(&exec_ctx);
GRPC_API_TRACE("grpc_init(void)", 0, ());
}
void grpc_shutdown(void) {
int i;
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
gpr_timers_global_destroy();
grpc_tracer_shutdown();

@ -127,10 +127,6 @@ typedef struct grpc_transport_stream_op_batch {
/** Values for the stream op (fields set are determined by flags above) */
grpc_transport_stream_op_batch_payload *payload;
/** Is the completion of this op covered by a poller (if false: the op should
complete independently of some pollset being polled) */
bool covered_by_poller : 1;
/** Send initial metadata to the peer, from the provided metadata batch. */
bool send_initial_metadata : 1;

@ -79,9 +79,6 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec b;
gpr_strvec_init(&b);
gpr_strvec_add(
&b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
if (op->send_initial_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));

@ -158,8 +158,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',

@ -126,11 +126,10 @@ static void call_resolver_next_after_locking(grpc_exec_ctx *exec_ctx,
a->resolver = resolver;
a->result = result;
a->on_complete = on_complete;
grpc_closure_sched(
exec_ctx,
grpc_closure_create(call_resolver_next_now_lock_taken, a,
grpc_combiner_scheduler(resolver->combiner, false)),
GRPC_ERROR_NONE);
grpc_closure_sched(exec_ctx, grpc_closure_create(
call_resolver_next_now_lock_taken, a,
grpc_combiner_scheduler(resolver->combiner)),
GRPC_ERROR_NONE);
}
int main(int argc, char **argv) {
@ -138,7 +137,7 @@ int main(int argc, char **argv) {
grpc_init();
gpr_mu_init(&g_mu);
g_combiner = grpc_combiner_create(NULL);
g_combiner = grpc_combiner_create();
grpc_resolve_address = my_resolve_address;
grpc_channel_args *result = (grpc_channel_args *)1;

@ -81,7 +81,7 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create(NULL);
g_combiner = grpc_combiner_create();
dns = grpc_resolver_factory_lookup("dns");

@ -67,12 +67,11 @@ static grpc_resolver *build_fake_resolver(
typedef struct on_resolution_arg {
grpc_channel_args *resolver_result;
grpc_channel_args *expected_resolver_result;
bool was_called;
gpr_event ev;
} on_resolution_arg;
void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
on_resolution_arg *res = arg;
res->was_called = true;
// We only check the addresses channel arg because that's the only one
// explicitly set by the test via
// grpc_fake_resolver_response_generator_set_response.
@ -84,11 +83,12 @@ void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_lb_addresses_cmp(actual_lb_addresses, expected_lb_addresses) == 0);
grpc_channel_args_destroy(exec_ctx, res->resolver_result);
grpc_channel_args_destroy(exec_ctx, res->expected_resolver_result);
gpr_event_set(&res->ev, (void *)1);
}
static void test_fake_resolver() {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner *combiner = grpc_combiner_create(NULL);
grpc_combiner *combiner = grpc_combiner_create();
// Create resolver.
grpc_fake_resolver_response_generator *response_generator =
grpc_fake_resolver_response_generator_create();
@ -115,8 +115,9 @@ static void test_fake_resolver() {
on_resolution_arg on_res_arg;
memset(&on_res_arg, 0, sizeof(on_res_arg));
on_res_arg.expected_resolver_result = results;
gpr_event_init(&on_res_arg.ev);
grpc_closure *on_resolution = grpc_closure_create(
on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner, false));
on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner));
// Set resolver results and trigger first resolution. on_resolution_cb
// performs the checks.
@ -125,7 +126,8 @@ static void test_fake_resolver() {
grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(on_res_arg.was_called);
GPR_ASSERT(gpr_event_wait(&on_res_arg.ev,
grpc_timeout_seconds_to_deadline(5)) != NULL);
// Setup update.
grpc_uri *uris_update[] = {
@ -150,8 +152,9 @@ static void test_fake_resolver() {
on_resolution_arg on_res_arg_update;
memset(&on_res_arg_update, 0, sizeof(on_res_arg_update));
on_res_arg_update.expected_resolver_result = results_update;
gpr_event_init(&on_res_arg_update.ev);
on_resolution = grpc_closure_create(on_resolution_cb, &on_res_arg_update,
grpc_combiner_scheduler(combiner, false));
grpc_combiner_scheduler(combiner));
// Set updated resolver results and trigger a second resolution.
grpc_fake_resolver_response_generator_set_response(
@ -159,7 +162,8 @@ static void test_fake_resolver() {
grpc_resolver_next_locked(&exec_ctx, resolver,
&on_res_arg_update.resolver_result, on_resolution);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(on_res_arg.was_called);
GPR_ASSERT(gpr_event_wait(&on_res_arg_update.ev,
grpc_timeout_seconds_to_deadline(5)) != NULL);
// Requesting a new resolution without re-senting the response shouldn't
// trigger the resolution callback.
@ -167,7 +171,9 @@ static void test_fake_resolver() {
grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(!on_res_arg.was_called);
GPR_ASSERT(gpr_event_wait(&on_res_arg.ev,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
GRPC_COMBINER_UNREF(&exec_ctx, combiner, "test_fake_resolver");
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_fake_resolver");

@ -104,7 +104,7 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create(NULL);
g_combiner = grpc_combiner_create();
ipv4 = grpc_resolver_factory_lookup("ipv4");
ipv6 = grpc_resolver_factory_lookup("ipv6");

@ -173,10 +173,9 @@ void grpc_fake_resolver_response_generator_set_response(
GPR_ASSERT(generator->resolver != NULL);
generator->next_response = grpc_channel_args_copy(next_response);
grpc_closure_sched(
exec_ctx,
grpc_closure_create(
set_response_cb, generator,
grpc_combiner_scheduler(generator->resolver->base.combiner, false)),
exec_ctx, grpc_closure_create(set_response_cb, generator,
grpc_combiner_scheduler(
generator->resolver->base.combiner)),
GRPC_ERROR_NONE);
}

@ -403,19 +403,19 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
grpc_endpoint_add_to_pollset_set(exec_ctx, endpoint, conn->pollset_set);
grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn,
grpc_combiner_scheduler(conn->proxy->combiner, false));
grpc_combiner_scheduler(conn->proxy->combiner));
grpc_slice_buffer_init(&conn->client_read_buffer);
grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
grpc_slice_buffer_init(&conn->client_write_buffer);
@ -456,7 +456,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
grpc_end2end_http_proxy* proxy =
(grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy));
memset(proxy, 0, sizeof(*proxy));
proxy->combiner = grpc_combiner_create(NULL);
proxy->combiner = grpc_combiner_create();
gpr_ref_init(&proxy->users, 1);
// Construct proxy address.
const int proxy_port = grpc_pick_unused_port_or_die();

@ -41,6 +41,7 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/timer.h"
@ -724,6 +725,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
gpr_now_impl = now_impl;
grpc_init();
grpc_timer_manager_set_threading(false);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_executor_set_threading(&exec_ctx, false);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resolve_address = my_resolve_address;
GPR_ASSERT(g_channel == NULL);

@ -37,6 +37,7 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "test/core/util/memory_counters.h"
@ -58,6 +59,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (leak_check) grpc_memory_counters_init();
grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_executor_set_threading(&exec_ctx, false);
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("client_fuzzer");

@ -34,6 +34,7 @@
#include <grpc/grpc.h>
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/server.h"
#include "test/core/util/memory_counters.h"
@ -56,6 +57,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (leak_check) grpc_memory_counters_init();
grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_executor_set_threading(&exec_ctx, false);
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("server_fuzzer");

@ -44,27 +44,29 @@
static void test_no_op(void) {
gpr_log(GPR_DEBUG, "test_no_op");
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_COMBINER_UNREF(&exec_ctx, grpc_combiner_create(NULL), "test_no_op");
GRPC_COMBINER_UNREF(&exec_ctx, grpc_combiner_create(), "test_no_op");
grpc_exec_ctx_finish(&exec_ctx);
}
static void set_bool_to_true(grpc_exec_ctx *exec_ctx, void *value,
grpc_error *error) {
*(bool *)value = true;
static void set_event_to_true(grpc_exec_ctx *exec_ctx, void *value,
grpc_error *error) {
gpr_event_set(value, (void *)1);
}
static void test_execute_one(void) {
gpr_log(GPR_DEBUG, "test_execute_one");
grpc_combiner *lock = grpc_combiner_create(NULL);
bool done = false;
grpc_combiner *lock = grpc_combiner_create();
gpr_event done;
gpr_event_init(&done);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_sched(&exec_ctx,
grpc_closure_create(set_bool_to_true, &done,
grpc_combiner_scheduler(lock, false)),
grpc_closure_create(set_event_to_true, &done,
grpc_combiner_scheduler(lock)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&done, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_one");
grpc_exec_ctx_finish(&exec_ctx);
}
@ -72,6 +74,7 @@ static void test_execute_one(void) {
typedef struct {
size_t ctr;
grpc_combiner *lock;
gpr_event done;
} thd_args;
typedef struct {
@ -95,23 +98,27 @@ static void execute_many_loop(void *a) {
ex_args *c = gpr_malloc(sizeof(*c));
c->ctr = &args->ctr;
c->value = n++;
grpc_closure_sched(
&exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler(
args->lock, false)),
GRPC_ERROR_NONE);
grpc_closure_sched(&exec_ctx,
grpc_closure_create(
check_one, c, grpc_combiner_scheduler(args->lock)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
}
// sleep for a little bit, to test a combiner draining and another thread
// picking it up
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100));
}
grpc_closure_sched(&exec_ctx,
grpc_closure_create(set_event_to_true, &args->done,
grpc_combiner_scheduler(args->lock)),
GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
static void test_execute_many(void) {
gpr_log(GPR_DEBUG, "test_execute_many");
grpc_combiner *lock = grpc_combiner_create(NULL);
grpc_combiner *lock = grpc_combiner_create();
gpr_thd_id thds[100];
thd_args ta[GPR_ARRAY_SIZE(thds)];
for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
@ -119,9 +126,12 @@ static void test_execute_many(void) {
gpr_thd_options_set_joinable(&options);
ta[i].ctr = 0;
ta[i].lock = lock;
gpr_event_init(&ta[i].done);
GPR_ASSERT(gpr_thd_new(&thds[i], execute_many_loop, &ta[i], &options));
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
GPR_ASSERT(gpr_event_wait(&ta[i].done,
gpr_inf_future(GPR_CLOCK_REALTIME)) != NULL);
gpr_thd_join(thds[i]);
}
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -129,30 +139,32 @@ static void test_execute_many(void) {
grpc_exec_ctx_finish(&exec_ctx);
}
static bool got_in_finally = false;
static gpr_event got_in_finally;
static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
got_in_finally = true;
gpr_event_set(&got_in_finally, (void *)1);
}
static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_closure_sched(exec_ctx, grpc_closure_create(
in_finally, NULL,
grpc_combiner_finally_scheduler(arg, false)),
grpc_closure_sched(exec_ctx,
grpc_closure_create(in_finally, arg,
grpc_combiner_finally_scheduler(arg)),
GRPC_ERROR_NONE);
}
static void test_execute_finally(void) {
gpr_log(GPR_DEBUG, "test_execute_finally");
grpc_combiner *lock = grpc_combiner_create(NULL);
grpc_combiner *lock = grpc_combiner_create();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure_sched(&exec_ctx,
grpc_closure_create(add_finally, lock,
grpc_combiner_scheduler(lock, false)),
GRPC_ERROR_NONE);
gpr_event_init(&got_in_finally);
grpc_closure_sched(
&exec_ctx,
grpc_closure_create(add_finally, lock, grpc_combiner_scheduler(lock)),
GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(got_in_finally);
GPR_ASSERT(gpr_event_wait(&got_in_finally,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_finally");
grpc_exec_ctx_finish(&exec_ctx);
}

@ -265,25 +265,30 @@ static void read_and_write_test(grpc_endpoint_test_config config,
static void inc_on_failure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
gpr_mu_lock(g_mu);
*(int *)arg += (error != GRPC_ERROR_NONE);
GPR_ASSERT(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, NULL)));
gpr_mu_unlock(g_mu);
}
static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count,
int want_fail_count) {
grpc_exec_ctx_flush(exec_ctx);
for (int i = 0; i < 5 && *fail_count < want_fail_count; i++) {
gpr_mu_lock(g_mu);
gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10);
while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 &&
*fail_count < want_fail_count) {
grpc_pollset_worker *worker = NULL;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec deadline =
gpr_time_add(now, gpr_time_from_seconds(1, GPR_TIMESPAN));
gpr_mu_lock(g_mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(exec_ctx, g_pollset, &worker, now, deadline)));
grpc_pollset_work(exec_ctx, g_pollset, &worker,
gpr_now(deadline.clock_type), deadline)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(g_mu);
}
GPR_ASSERT(*fail_count == want_fail_count);
gpr_mu_unlock(g_mu);
}
static void multiple_shutdown_test(grpc_endpoint_test_config config) {

@ -47,7 +47,6 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "test/core/util/test_config.h"
typedef struct test_pollset {
@ -131,86 +130,6 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
}
}
static void increment(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
++*(int *)arg;
}
/*
* Validate that merging two workqueues preserves the closures in each queue.
* This is a regression test for a bug in
* polling_island_merge()[ev_epoll_linux.c], where the parent relationship was
* inverted.
*/
#define NUM_FDS 2
#define NUM_POLLSETS 2
#define NUM_CLOSURES 4
static void test_pollset_queue_merge_items() {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
test_fd tfds[NUM_FDS];
int fds[NUM_FDS];
test_pollset pollsets[NUM_POLLSETS];
grpc_closure closures[NUM_CLOSURES];
int i;
int result = 0;
test_fd_init(tfds, fds, NUM_FDS);
test_pollset_init(pollsets, NUM_POLLSETS);
/* Two distinct polling islands, each with their own FD and pollset. */
for (i = 0; i < NUM_FDS; i++) {
grpc_pollset_add_fd(&exec_ctx, pollsets[i].pollset, tfds[i].fd);
grpc_exec_ctx_flush(&exec_ctx);
}
/* Enqeue the closures, 3 to polling island 0 and 1 to polling island 1. */
grpc_closure_init(
closures, increment, &result,
grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd)));
grpc_closure_init(
closures + 1, increment, &result,
grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd)));
grpc_closure_init(
closures + 2, increment, &result,
grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd)));
grpc_closure_init(
closures + 3, increment, &result,
grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[1].fd)));
for (i = 0; i < NUM_CLOSURES; ++i) {
grpc_closure_sched(&exec_ctx, closures + i, GRPC_ERROR_NONE);
}
/* Merge the two polling islands. */
grpc_pollset_add_fd(&exec_ctx, pollsets[0].pollset, tfds[1].fd);
grpc_exec_ctx_flush(&exec_ctx);
/*
* Execute the closures, verify we see each one execute when executing work on
* the merged polling island.
*/
grpc_pollset_worker *worker = NULL;
for (i = 0; i < NUM_CLOSURES; ++i) {
const gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(2, GPR_TIMESPAN));
gpr_mu_lock(pollsets[1].mu);
GRPC_LOG_IF_ERROR(
"grpc_pollset_work",
grpc_pollset_work(&exec_ctx, pollsets[1].pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline));
gpr_mu_unlock(pollsets[1].mu);
}
GPR_ASSERT(result == NUM_CLOSURES);
test_fd_cleanup(&exec_ctx, tfds, NUM_FDS);
test_pollset_cleanup(&exec_ctx, pollsets, NUM_POLLSETS);
grpc_exec_ctx_finish(&exec_ctx);
}
#undef NUM_FDS
#undef NUM_POLLSETS
#undef NUM_CLOSURES
/*
* Cases to test:
* case 1) Polling islands of both fd and pollset are NULL
@ -402,13 +321,13 @@ static void test_threading(void) {
int main(int argc, char **argv) {
const char *poll_strategy = NULL;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_iomgr_start();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
poll_strategy = grpc_get_poll_strategy_name();
if (poll_strategy != NULL && strcmp(poll_strategy, "epollsig") == 0) {
test_add_fd_to_pollset();
test_pollset_queue_merge_items();
test_threading();
} else {
gpr_log(GPR_INFO,
@ -417,11 +336,8 @@ int main(int argc, char **argv) {
poll_strategy);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
return 0;
}
#else /* defined(GRPC_LINUX_EPOLL) */

@ -45,8 +45,9 @@ int main(int argc, char **argv) {
grpc_endpoint_pair p;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_iomgr_start();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
/* set max # of file descriptors to a low value, and
verify we can create and destroy many more than this number
@ -57,19 +58,15 @@ int main(int argc, char **argv) {
grpc_resource_quota_create("fd_conservation_posix_test");
for (i = 0; i < 100; i++) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
p = grpc_iomgr_create_endpoint_pair("test", NULL);
grpc_endpoint_destroy(&exec_ctx, p.client);
grpc_endpoint_destroy(&exec_ctx, p.server);
grpc_exec_ctx_finish(&exec_ctx);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_resource_quota_unref(resource_quota);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
return 0;
}

@ -542,8 +542,8 @@ int main(int argc, char **argv) {
grpc_closure destroyed;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_iomgr_start();
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
g_pollset = gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
test_grpc_fd();

@ -447,8 +447,8 @@ int main(int argc, char **argv) {
const char *poll_strategy = grpc_get_poll_strategy_name();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_iomgr_start();
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
if (poll_strategy != NULL &&
(strcmp(poll_strategy, "epoll") == 0 ||

@ -174,16 +174,13 @@ static void test_unix_socket_path_name_too_long(void) {
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_executor_init();
grpc_iomgr_init();
grpc_iomgr_start();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
test_unix_socket();
test_unix_socket_path_name_too_long();
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
return 0;
}

@ -73,7 +73,9 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
grpc_closure do_nothing_cb;
grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
grpc_schedule_on_exec_ctx);
gpr_mu_lock(args->mu);
grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
gpr_mu_unlock(args->mu);
// exec_ctx needs to be flushed before calling grpc_pollset_destroy()
grpc_exec_ctx_flush(exec_ctx);
grpc_pollset_destroy(exec_ctx, args->pollset);
@ -263,9 +265,9 @@ static void test_unparseable_hostports(void) {
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_executor_init();
grpc_iomgr_init();
grpc_iomgr_start();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_init(&exec_ctx);
grpc_iomgr_start(&exec_ctx);
test_localhost();
test_default_port();
test_non_numeric_default_port();
@ -274,11 +276,8 @@ int main(int argc, char **argv) {
test_ipv6_without_port();
test_invalid_ip_addresses();
test_unparseable_hostports();
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
return 0;
}

@ -39,15 +39,30 @@
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/util/test_config.h"
gpr_mu g_mu;
gpr_cv g_cv;
static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
gpr_mu_lock(&g_mu);
++*(int *)a;
gpr_cv_signal(&g_cv);
gpr_mu_unlock(&g_mu);
}
static void assert_counter_becomes(int *ctr, int value) {
gpr_mu_lock(&g_mu);
gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
while (*ctr != value) {
GPR_ASSERT(!gpr_cv_wait(&g_cv, &g_mu, deadline));
}
gpr_mu_unlock(&g_mu);
}
static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
*(bool *)a = true;
static void set_event_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
gpr_event_set((gpr_event *)a, (void *)1);
}
grpc_closure *set_bool(bool *p) {
return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx);
grpc_closure *set_event(gpr_event *ev) {
return grpc_closure_create(set_event_cb, ev, grpc_schedule_on_exec_ctx);
}
typedef struct {
@ -154,11 +169,13 @@ static void test_simple_async_alloc(void) {
grpc_resource_quota_resize(q, 1024 * 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -175,15 +192,18 @@ static void test_async_alloc_blocked_by_size(void) {
grpc_resource_quota_create("test_async_alloc_blocked_by_size");
grpc_resource_quota_resize(q, 1);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
GPR_ASSERT(gpr_event_wait(
&ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL);
}
grpc_resource_quota_resize(q, 1024);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != NULL);
;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, usr, 1024);
@ -200,11 +220,14 @@ static void test_scavenge(void) {
grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1");
grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2");
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -212,11 +235,14 @@ static void test_scavenge(void) {
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -234,26 +260,31 @@ static void test_scavenge_blocked(void) {
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1");
grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2");
bool done;
gpr_event ev;
{
done = false;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
done = false;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
GPR_ASSERT(gpr_event_wait(
&ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, usr1, 1024);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -272,27 +303,35 @@ static void test_blocked_until_scheduled_reclaim(void) {
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
bool reclaim_done = false;
gpr_event reclaim_done;
gpr_event_init(&reclaim_done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false,
make_reclaimer(usr, 1024, set_bool(&reclaim_done)));
make_reclaimer(usr, 1024, set_event(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&reclaim_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -311,27 +350,35 @@ static void test_blocked_until_scheduled_reclaim_and_scavenge(void) {
grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1");
grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2");
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
bool reclaim_done = false;
gpr_event reclaim_done;
gpr_event_init(&reclaim_done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr1, false,
make_reclaimer(usr1, 1024, set_bool(&reclaim_done)));
make_reclaimer(usr1, 1024, set_event(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&reclaim_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -350,27 +397,35 @@ static void test_blocked_until_scheduled_destructive_reclaim(void) {
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
bool reclaim_done = false;
gpr_event reclaim_done;
gpr_event_init(&reclaim_done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, true,
make_reclaimer(usr, 1024, set_bool(&reclaim_done)));
make_reclaimer(usr, 1024, set_event(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&reclaim_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -387,23 +442,31 @@ static void test_unused_reclaim_is_cancelled(void) {
grpc_resource_quota_create("test_unused_reclaim_is_cancelled");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
bool benign_done = false;
bool destructive_done = false;
gpr_event benign_done;
gpr_event_init(&benign_done);
gpr_event destructive_done;
gpr_event_init(&destructive_done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false, make_unused_reclaimer(set_bool(&benign_done)));
&exec_ctx, usr, false, make_unused_reclaimer(set_event(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, true,
make_unused_reclaimer(set_bool(&destructive_done)));
make_unused_reclaimer(set_event(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
grpc_resource_quota_unref(q);
destroy_user(usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
static void test_benign_reclaim_is_preferred(void) {
@ -412,35 +475,49 @@ static void test_benign_reclaim_is_preferred(void) {
grpc_resource_quota_create("test_benign_reclaim_is_preferred");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
bool benign_done = false;
bool destructive_done = false;
gpr_event benign_done;
gpr_event_init(&benign_done);
gpr_event destructive_done;
gpr_event_init(&destructive_done);
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false,
make_reclaimer(usr, 1024, set_bool(&benign_done)));
make_reclaimer(usr, 1024, set_event(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, true,
make_unused_reclaimer(set_bool(&destructive_done)));
make_unused_reclaimer(set_event(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(benign_done);
GPR_ASSERT(!destructive_done);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -449,8 +526,10 @@ static void test_benign_reclaim_is_preferred(void) {
}
grpc_resource_quota_unref(q);
destroy_user(usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
static void test_multiple_reclaims_can_be_triggered(void) {
@ -459,35 +538,49 @@ static void test_multiple_reclaims_can_be_triggered(void) {
grpc_resource_quota_create("test_multiple_reclaims_can_be_triggered");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
bool benign_done = false;
bool destructive_done = false;
gpr_event benign_done;
gpr_event_init(&benign_done);
gpr_event destructive_done;
gpr_event_init(&destructive_done);
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false,
make_reclaimer(usr, 512, set_bool(&benign_done)));
make_reclaimer(usr, 512, set_event(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, true,
make_reclaimer(usr, 512, set_bool(&destructive_done)));
make_reclaimer(usr, 512, set_event(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
bool done = false;
gpr_event ev;
gpr_event_init(&ev);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
GPR_ASSERT(done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
NULL);
;
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -496,8 +589,10 @@ static void test_multiple_reclaims_can_be_triggered(void) {
}
grpc_resource_quota_unref(q);
destroy_user(usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
GPR_ASSERT(gpr_event_wait(&benign_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&destructive_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
static void test_resource_user_stays_allocated_until_memory_released(void) {
@ -538,34 +633,44 @@ test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released(
grpc_resource_quota_resize(q, 1024);
for (int i = 0; i < 10; i++) {
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
bool reclaimer_cancelled = false;
gpr_event reclaimer_cancelled;
gpr_event_init(&reclaimer_cancelled);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false,
make_unused_reclaimer(set_bool(&reclaimer_cancelled)));
make_unused_reclaimer(set_event(&reclaimer_cancelled)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!reclaimer_cancelled);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
bool allocated = false;
gpr_event allocated;
gpr_event_init(&allocated);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
GPR_ASSERT(!reclaimer_cancelled);
GPR_ASSERT(gpr_event_wait(&allocated,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_unref(&exec_ctx, usr);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!reclaimer_cancelled);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaimer_cancelled);
GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
}
grpc_resource_quota_unref(q);
@ -578,29 +683,37 @@ static void test_reclaimers_can_be_posted_repeatedly(void) {
grpc_resource_quota_resize(q, 1024);
grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
{
bool allocated = false;
gpr_event allocated;
gpr_event_init(&allocated);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
GPR_ASSERT(gpr_event_wait(&allocated,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
for (int i = 0; i < 10; i++) {
bool reclaimer_done = false;
gpr_event reclaimer_done;
gpr_event_init(&reclaimer_done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, usr, false,
make_reclaimer(usr, 1024, set_bool(&reclaimer_done)));
make_reclaimer(usr, 1024, set_event(&reclaimer_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!reclaimer_done);
GPR_ASSERT(gpr_event_wait(&reclaimer_done,
grpc_timeout_milliseconds_to_deadline(100)) ==
NULL);
}
{
bool allocated = false;
gpr_event allocated;
gpr_event_init(&allocated);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated));
grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
GPR_ASSERT(reclaimer_done);
GPR_ASSERT(gpr_event_wait(&allocated,
grpc_timeout_seconds_to_deadline(5)) != NULL);
GPR_ASSERT(gpr_event_wait(&reclaimer_done,
grpc_timeout_seconds_to_deadline(5)) != NULL);
}
}
{
@ -632,7 +745,7 @@ static void test_one_slice(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(num_allocs == start_allocs + 1);
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
{
@ -665,7 +778,7 @@ static void test_one_slice_deleted_late(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(num_allocs == start_allocs + 1);
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
{
@ -709,7 +822,7 @@ static void test_negative_rq_free_pool(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(num_allocs == start_allocs + 1);
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
grpc_resource_quota_resize(q, 512);
@ -735,6 +848,8 @@ static void test_negative_rq_free_pool(void) {
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
gpr_mu_init(&g_mu);
gpr_cv_init(&g_cv);
test_no_op();
test_resize_then_destroy();
test_resource_user_no_op();
@ -757,6 +872,8 @@ int main(int argc, char **argv) {
test_one_slice_deleted_late();
test_resize_to_zero();
test_negative_rq_free_pool();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);
grpc_shutdown();
return 0;
}

@ -162,6 +162,7 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data,
gpr_log(GPR_INFO, "Read %" PRIuPTR " bytes of %" PRIuPTR, read_bytes,
state->target_read_bytes);
if (state->read_bytes >= state->target_read_bytes) {
GPR_ASSERT(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, NULL)));
gpr_mu_unlock(g_mu);
} else {
grpc_endpoint_read(exec_ctx, state->ep, &state->incoming, &state->read_cb);

@ -117,18 +117,9 @@ static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
static int me_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static const grpc_endpoint_vtable vtable = {
me_read,
me_write,
me_get_workqueue,
me_add_to_pollset,
me_add_to_pollset_set,
me_shutdown,
me_destroy,
me_get_resource_user,
me_get_peer,
me_read, me_write, me_add_to_pollset, me_add_to_pollset_set,
me_shutdown, me_destroy, me_get_resource_user, me_get_peer,
me_get_fd,
};

@ -169,23 +169,14 @@ static char *me_get_peer(grpc_endpoint *ep) {
static int me_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
half *m = (half *)ep;
return m->resource_user;
}
static const grpc_endpoint_vtable vtable = {
me_read,
me_write,
me_get_workqueue,
me_add_to_pollset,
me_add_to_pollset_set,
me_shutdown,
me_destroy,
me_get_resource_user,
me_get_peer,
me_read, me_write, me_add_to_pollset, me_add_to_pollset_set,
me_shutdown, me_destroy, me_get_resource_user, me_get_peer,
me_get_fd,
};

@ -92,11 +92,6 @@ static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_mu_unlock(&te->mu);
}
static grpc_workqueue *te_get_workqueue(grpc_endpoint *ep) {
trickle_endpoint *te = (trickle_endpoint *)ep;
return grpc_endpoint_get_workqueue(te->wrapped);
}
static void te_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset) {
trickle_endpoint *te = (trickle_endpoint *)ep;
@ -155,16 +150,10 @@ static void te_finish_write(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&te->mu);
}
static const grpc_endpoint_vtable vtable = {te_read,
te_write,
te_get_workqueue,
te_add_to_pollset,
te_add_to_pollset_set,
te_shutdown,
te_destroy,
te_get_resource_user,
te_get_peer,
te_get_fd};
static const grpc_endpoint_vtable vtable = {
te_read, te_write, te_add_to_pollset, te_add_to_pollset_set,
te_shutdown, te_destroy, te_get_resource_user, te_get_peer,
te_get_fd};
grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap,
double bytes_per_second) {

@ -60,16 +60,10 @@ auto &force_library_initialization = Library::get();
class DummyEndpoint : public grpc_endpoint {
public:
DummyEndpoint() {
static const grpc_endpoint_vtable my_vtable = {read,
write,
get_workqueue,
add_to_pollset,
add_to_pollset_set,
shutdown,
destroy,
get_resource_user,
get_peer,
get_fd};
static const grpc_endpoint_vtable my_vtable = {
read, write, add_to_pollset, add_to_pollset_set,
shutdown, destroy, get_resource_user, get_peer,
get_fd};
grpc_endpoint::vtable = &my_vtable;
ru_ = grpc_resource_user_create(Library::get().rq(), "dummy_endpoint");
}

@ -84,12 +84,12 @@ BENCHMARK(BM_ClosureInitAgainstExecCtx);
static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner = grpc_combiner_create(NULL);
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(grpc_closure_init(
&c, DoNothing, NULL, grpc_combiner_scheduler(combiner, false)));
&c, DoNothing, NULL, grpc_combiner_scheduler(combiner)));
}
GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
grpc_exec_ctx_finish(&exec_ctx);
@ -259,10 +259,9 @@ BENCHMARK(BM_TryAcquireSpinlock);
static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner = grpc_combiner_create(NULL);
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c;
grpc_closure_init(&c, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_closure_sched(&exec_ctx, &c, GRPC_ERROR_NONE);
@ -276,13 +275,11 @@ BENCHMARK(BM_ClosureSchedOnCombiner);
static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner = grpc_combiner_create(NULL);
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
grpc_closure_init(&c1, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c2, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_closure_init(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
@ -297,16 +294,13 @@ BENCHMARK(BM_ClosureSched2OnCombiner);
static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner = grpc_combiner_create(NULL);
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
grpc_closure c3;
grpc_closure_init(&c1, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c2, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c3, DoNothing, NULL,
grpc_combiner_scheduler(combiner, false));
grpc_closure_init(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_closure_init(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_closure_init(&c3, DoNothing, NULL, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
@ -322,14 +316,12 @@ BENCHMARK(BM_ClosureSched3OnCombiner);
static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner1 = grpc_combiner_create(NULL);
grpc_combiner* combiner2 = grpc_combiner_create(NULL);
grpc_combiner* combiner1 = grpc_combiner_create();
grpc_combiner* combiner2 = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
grpc_closure_init(&c1, DoNothing, NULL,
grpc_combiner_scheduler(combiner1, false));
grpc_closure_init(&c2, DoNothing, NULL,
grpc_combiner_scheduler(combiner2, false));
grpc_closure_init(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
grpc_closure_init(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
@ -345,20 +337,16 @@ BENCHMARK(BM_ClosureSched2OnTwoCombiners);
static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner1 = grpc_combiner_create(NULL);
grpc_combiner* combiner2 = grpc_combiner_create(NULL);
grpc_combiner* combiner1 = grpc_combiner_create();
grpc_combiner* combiner2 = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
grpc_closure c3;
grpc_closure c4;
grpc_closure_init(&c1, DoNothing, NULL,
grpc_combiner_scheduler(combiner1, false));
grpc_closure_init(&c2, DoNothing, NULL,
grpc_combiner_scheduler(combiner2, false));
grpc_closure_init(&c3, DoNothing, NULL,
grpc_combiner_scheduler(combiner1, false));
grpc_closure_init(&c4, DoNothing, NULL,
grpc_combiner_scheduler(combiner2, false));
grpc_closure_init(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
grpc_closure_init(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
grpc_closure_init(&c3, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
grpc_closure_init(&c4, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
@ -418,8 +406,8 @@ BENCHMARK(BM_ClosureReschedOnExecCtx);
static void BM_ClosureReschedOnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner* combiner = grpc_combiner_create(NULL);
Rescheduler r(state, grpc_combiner_scheduler(combiner, false));
grpc_combiner* combiner = grpc_combiner_create();
Rescheduler r(state, grpc_combiner_scheduler(combiner));
r.ScheduleFirst(&exec_ctx);
grpc_exec_ctx_flush(&exec_ctx);
GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
@ -431,10 +419,10 @@ BENCHMARK(BM_ClosureReschedOnCombiner);
static void BM_ClosureReschedOnCombinerFinally(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner* combiner = grpc_combiner_create(NULL);
Rescheduler r(state, grpc_combiner_finally_scheduler(combiner, false));
r.ScheduleFirstAgainstDifferentScheduler(
&exec_ctx, grpc_combiner_scheduler(combiner, false));
grpc_combiner* combiner = grpc_combiner_create();
Rescheduler r(state, grpc_combiner_finally_scheduler(combiner));
r.ScheduleFirstAgainstDifferentScheduler(&exec_ctx,
grpc_combiner_scheduler(combiner));
grpc_exec_ctx_flush(&exec_ctx);
GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
grpc_exec_ctx_finish(&exec_ctx);

@ -1204,11 +1204,6 @@ src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_pipe.h \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/wakeup_fd_posix.h \
src/core/lib/iomgr/workqueue.h \
src/core/lib/iomgr/workqueue_uv.c \
src/core/lib/iomgr/workqueue_uv.h \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/iomgr/workqueue_windows.h \
src/core/lib/json/json.c \
src/core/lib/json/json.h \
src/core/lib/json/json_common.h \

@ -7885,9 +7885,6 @@
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_uv.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
"src/core/lib/json/json_reader.h",
@ -8094,11 +8091,6 @@
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_uv.c",
"src/core/lib/iomgr/workqueue_uv.h",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.c",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",

@ -374,9 +374,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_cv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h" />
@ -685,10 +682,6 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">

@ -247,12 +247,6 @@
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
<Filter>src\core\lib\json</Filter>
</ClCompile>
@ -1067,15 +1061,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h">
<Filter>src\core\lib\json</Filter>
</ClInclude>

@ -269,9 +269,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_cv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h" />
@ -513,10 +510,6 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">

@ -304,12 +304,6 @@
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
<Filter>src\core\lib\json</Filter>
</ClCompile>
@ -806,15 +800,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h">
<Filter>src\core\lib\json</Filter>
</ClInclude>

@ -364,9 +364,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_cv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h" />
@ -652,10 +649,6 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">

@ -250,12 +250,6 @@
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
<Filter>src\core\lib\iomgr</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
<Filter>src\core\lib\json</Filter>
</ClCompile>
@ -977,15 +971,6 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h">
<Filter>src\core\lib\json</Filter>
</ClInclude>

Loading…
Cancel
Save