Merge pull request #6160 from ctiller/api_fuzzer

Core API fuzzer
pull/6285/head
Nicolas Noble 9 years ago
commit c3d869ef58
  1. 72
      Makefile
  2. 16
      build.yaml
  3. 38
      src/core/ext/client_config/client_channel.c
  4. 7
      src/core/ext/client_config/subchannel.c
  5. 6
      src/core/ext/client_config/subchannel_call_holder.c
  6. 14
      src/core/ext/resolver/dns/native/dns_resolver.c
  7. 6
      src/core/ext/resolver/zookeeper/zookeeper_resolver.c
  8. 4
      src/core/ext/transport/chttp2/client/insecure/channel_create.c
  9. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  10. 10
      src/core/lib/channel/compress_filter.c
  11. 2
      src/core/lib/http/httpcli.c
  12. 6
      src/core/lib/iomgr/closure.c
  13. 3
      src/core/lib/iomgr/closure.h
  14. 2
      src/core/lib/iomgr/exec_ctx.h
  15. 5
      src/core/lib/iomgr/resolve_address.h
  16. 9
      src/core/lib/iomgr/resolve_address_posix.c
  17. 9
      src/core/lib/iomgr/resolve_address_windows.c
  18. 25
      src/core/lib/iomgr/tcp_client_posix.c
  19. 15
      src/core/lib/iomgr/timer.c
  20. 10
      src/core/lib/support/time_posix.c
  21. 95
      src/core/lib/surface/call.c
  22. 3
      src/core/lib/surface/lame_client.c
  23. 2
      src/core/lib/surface/validate_metadata.c
  24. 1
      src/core/lib/transport/metadata.h
  25. 2
      templates/tools/fuzzer/runners.template
  26. 896
      test/core/end2end/fuzzers/api_fuzzer.c
  27. 27
      test/core/end2end/fuzzers/api_fuzzer.dictionary
  28. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/00.bin
  29. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/01.bin
  30. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0159f564d91869bc07239f5551a493c2845a4524
  31. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/02.bin
  32. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0211f960c2da343c3cde6406e650d73278e01e47
  33. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0236f28708dcc2e044d67ecf93539ce6c33a727a
  34. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/02434dcdaca96b9eacee76eb351e99f015eaa05e
  35. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/03.bin
  36. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0302b90625ac9f61f45b45d043fda23b5472d711
  37. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/04.bin
  38. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0433cabb8c28820bda0a6eac35d17d120f1b6865
  39. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0452ea591951af85724608917fda16926dad7451
  40. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0468ab4bf4f7e10b680f43efae4bf9686834d220
  41. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/04e01f399f194434b2b724877df64828e8f52c14
  42. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/05.bin
  43. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0539bf31b2310091ce30d0123142d63589939105
  44. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0542a0e5aeb1658cc965724bfced56770569263b
  45. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/056e56878b249c7fd0b95576b352ab2f4d46582e
  46. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/05dee1c3847f2bca29bd14ed701ce64999b298b2
  47. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/06.bin
  48. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/064d3beeef29a647deb1b345426ea7212de71cfe
  49. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/067298a97640cc5e212647864d21bc1fa6bb7e75
  50. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/07.bin
  51. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/070c7005e63abba72c6bc1a0ee6d44e340f2d2be
  52. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/07674d39538e07c29342cb2ee8856bc71fc06638
  53. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/07aa7d6c71878eb78b25ca12d79082f70ae7f64c
  54. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/07ae5ed3dedbd83e376c892a9546cc0cd733c26f
  55. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/07cc8b298d1502d0c30f3f160871e66e5a1f3fe1
  56. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/08.bin
  57. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/085865a209776911782f592c9f30ffe0ad3814a0
  58. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/09.bin
  59. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/092b85d1f5c922287e476e6e75ad8a0a80c779a6
  60. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/09923e3ef02243b1902406c637f9516cbe99d7cb
  61. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0a.bin
  62. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0a71ae781345f9ee2b08008a81f9055e6c1d5256
  63. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0a7aad5682c304b0cbda31445b221238e0293a9f
  64. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0b.bin
  65. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0b6f0ea99a329e054032e6c292b99c3bcad0c9f2
  66. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0bbd89b21cfd192174c25803c7f1afeec88e6524
  67. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0c.bin
  68. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0d.bin
  69. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0d16d6c2c128ac4ee7b596b763822b4194968533
  70. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0d8bd296d63a5aca5f80d7a7d00387048babda36
  71. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0d9d8241c5568fea586d21f91ae1891dac31ba24
  72. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0e.bin
  73. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0e2a9ad3aacba320563095a874768a9e546a3db2
  74. 1
      test/core/end2end/fuzzers/api_fuzzer_corpus/0f.bin
  75. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0f2831e0f73521a0991e11115c16847afca16bb3
  76. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/0fa216ec645b3973b5e6d28baedd5acc1542e69e
  77. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/10302aa7598eb36d0ac22d0478eb0f2a6b010ea6
  78. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/1109cb814fd134862a3f5ef5c9b2244585882b8f
  79. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/119410315423e5f37919886ced7f03235e5792aa
  80. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/12083209096187575021a775826b08b70b39ed4c
  81. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/1254c9256157e6362003c97c8c93d8cd67a28772
  82. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/12a97827d0f817e3ffd8d9cf1bdba0f945b6fda4
  83. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/12ef45f6beba92677a2a7508fc5e1bfef30ded66
  84. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/130c41e2dd87c36b4079c8e5bd380dbe3e0a2b38
  85. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/13c409dcf7752c25b2b51ac5fad9201b505d7059
  86. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/143789594154049441d565b65ce725fc4f8c12bc
  87. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/149044286608a7945721c61f12196bebd5adb2ee
  88. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/157586c7c0ba8fd0dc9bfc2426229a7da934cec2
  89. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/15c37fe5be9f23c0f0e59e12ee7666007acdb3c5
  90. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/1661d0799cbf2015fd64e9f648ebb49281d41c6d
  91. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/1671cf01e5baf796c5572b7b0e15d226a5c93f23
  92. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/16a9beb811f836a444172a5da9290b47d77c32ef
  93. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/16d52016278caebf92ba455f7ac8a8c7482c3563
  94. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/173ebf4139ee6d7a574b6767059d82375674bbf4
  95. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/17cfb281eaa8a17d77e08c3648bb93f3b5aa5297
  96. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/183c878064b6a0ddf6a22dc4a2aa0d33a2d802d0
  97. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/1887558eb48d6a4341610fd0395cef8e87744044
  98. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/18c856af1e2ebb934401e523043eaf80aecc8363
  99. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/18f2d7626b6ad4859e735e448b00b6916f1d3e2e
  100. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/19dcc3082c76b85177ce6a56d195473aaa285268
  101. Some files were not shown because too many files have changed in this diff Show More

@ -881,6 +881,7 @@ alarm_test: $(BINDIR)/$(CONFIG)/alarm_test
algorithm_test: $(BINDIR)/$(CONFIG)/algorithm_test
alloc_test: $(BINDIR)/$(CONFIG)/alloc_test
alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer
bin_encoder_test: $(BINDIR)/$(CONFIG)/bin_encoder_test
census_context_test: $(BINDIR)/$(CONFIG)/census_context_test
channel_create_test: $(BINDIR)/$(CONFIG)/channel_create_test
@ -1119,6 +1120,7 @@ h2_sockpair_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair_nosec_test
h2_sockpair+trace_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair+trace_nosec_test
h2_sockpair_1byte_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_nosec_test
h2_uds_nosec_test: $(BINDIR)/$(CONFIG)/h2_uds_nosec_test
api_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/api_fuzzer_one_entry
client_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/client_fuzzer_one_entry
hpack_parser_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test_one_entry
http_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/http_fuzzer_test_one_entry
@ -1347,6 +1349,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_sockpair+trace_nosec_test \
$(BINDIR)/$(CONFIG)/h2_sockpair_1byte_nosec_test \
$(BINDIR)/$(CONFIG)/h2_uds_nosec_test \
$(BINDIR)/$(CONFIG)/api_fuzzer_one_entry \
$(BINDIR)/$(CONFIG)/client_fuzzer_one_entry \
$(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test_one_entry \
$(BINDIR)/$(CONFIG)/http_fuzzer_test_one_entry \
@ -2711,6 +2714,7 @@ LIBGRPC_TEST_UTIL_SRC = \
test/core/util/memory_counters.c \
test/core/util/mock_endpoint.c \
test/core/util/parse_hexstring.c \
test/core/util/passthru_endpoint.c \
test/core/util/port_posix.c \
test/core/util/port_server_client.c \
test/core/util/port_windows.c \
@ -2760,6 +2764,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
test/core/util/memory_counters.c \
test/core/util/mock_endpoint.c \
test/core/util/parse_hexstring.c \
test/core/util/passthru_endpoint.c \
test/core/util/port_posix.c \
test/core/util/port_server_client.c \
test/core/util/port_windows.c \
@ -6052,6 +6057,38 @@ endif
endif
API_FUZZER_SRC = \
test/core/end2end/fuzzers/api_fuzzer.c \
API_FUZZER_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(API_FUZZER_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/api_fuzzer: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/api_fuzzer: $(API_FUZZER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(API_FUZZER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -lFuzzer -o $(BINDIR)/$(CONFIG)/api_fuzzer
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fuzzers/api_fuzzer.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_api_fuzzer: $(API_FUZZER_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(API_FUZZER_OBJS:.o=.dep)
endif
endif
BIN_ENCODER_TEST_SRC = \
test/core/transport/chttp2/bin_encoder_test.c \
@ -13848,6 +13885,41 @@ ifneq ($(NO_DEPS),true)
endif
API_FUZZER_ONE_ENTRY_SRC = \
test/core/end2end/fuzzers/api_fuzzer.c \
test/core/util/one_corpus_entry_fuzzer.c \
API_FUZZER_ONE_ENTRY_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(API_FUZZER_ONE_ENTRY_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/api_fuzzer_one_entry: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/api_fuzzer_one_entry: $(API_FUZZER_ONE_ENTRY_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(API_FUZZER_ONE_ENTRY_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/api_fuzzer_one_entry
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fuzzers/api_fuzzer.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/core/util/one_corpus_entry_fuzzer.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_api_fuzzer_one_entry: $(API_FUZZER_ONE_ENTRY_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(API_FUZZER_ONE_ENTRY_OBJS:.o=.dep)
endif
endif
CLIENT_FUZZER_ONE_ENTRY_SRC = \
test/core/end2end/fuzzers/client_fuzzer.c \
test/core/util/one_corpus_entry_fuzzer.c \

@ -441,6 +441,7 @@ filegroups:
- test/core/util/memory_counters.h
- test/core/util/mock_endpoint.h
- test/core/util/parse_hexstring.h
- test/core/util/passthru_endpoint.h
- test/core/util/port.h
- test/core/util/port_server_client.h
- test/core/util/slice_splitter.h
@ -452,6 +453,7 @@ filegroups:
- test/core/util/memory_counters.c
- test/core/util/mock_endpoint.c
- test/core/util/parse_hexstring.c
- test/core/util/passthru_endpoint.c
- test/core/util/port_posix.c
- test/core/util/port_server_client.c
- test/core/util/port_windows.c
@ -1116,6 +1118,20 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: api_fuzzer
build: fuzzer
language: c
src:
- test/core/end2end/fuzzers/api_fuzzer.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
corpus_dirs:
- test/core/end2end/fuzzers/api_fuzzer_corpus
dict: test/core/end2end/fuzzers/api_fuzzer.dictionary
maxlen: 2048
- name: bin_encoder_test
build: test
language: c

@ -205,7 +205,11 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&chand->mu_config);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
} else if (chand->resolver == NULL /* disconnected */) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures);
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
}
@ -293,6 +297,11 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_resolver_shutdown(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures);
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
@ -321,10 +330,10 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
continue_picking_args *cpa = arg;
if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
} else if (cpa->connected_subchannel == NULL) {
if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready)) {
@ -381,14 +390,19 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
&chand->incoming_configuration,
&chand->on_config_changed);
}
cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->initial_metadata_flags = initial_metadata_flags;
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa);
grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
if (chand->resolver != NULL) {
cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->initial_metadata_flags = initial_metadata_flags;
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa);
grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure,
1);
} else {
grpc_exec_ctx_enqueue(exec_ctx, on_ready, false, NULL);
}
gpr_mu_unlock(&chand->mu_config);
return 0;
}

@ -135,8 +135,6 @@ struct grpc_subchannel {
int have_alarm;
/** our alarm */
grpc_timer alarm;
/** current random value */
uint32_t random;
};
struct grpc_subchannel_call {
@ -297,10 +295,6 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
}
}
static uint32_t random_seed() {
return (uint32_t)(gpr_time_to_millis(gpr_now(GPR_CLOCK_MONOTONIC)));
}
grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
grpc_subchannel_args *args) {
@ -332,7 +326,6 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_set_initial_connect_string(&c->addr, &c->addr_len,
&c->initial_connect_string);
c->args = grpc_channel_args_copy(args->args);
c->random = random_seed();
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c);

@ -252,9 +252,9 @@ char *grpc_subchannel_call_holder_get_peer(
grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *subchannel_call = GET_CALL(holder);
if (subchannel_call) {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
} else {
if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
}
}

@ -86,7 +86,8 @@ typedef struct {
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_start_resolving_locked(dns_resolver *r);
static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
@ -119,7 +120,7 @@ static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&r->mu);
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(r);
dns_start_resolving_locked(exec_ctx, r);
}
gpr_mu_unlock(&r->mu);
}
@ -134,7 +135,7 @@ static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
r->target_config = target_config;
if (r->resolved_version == 0 && !r->resolving) {
gpr_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(r);
dns_start_resolving_locked(exec_ctx, r);
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
}
@ -149,7 +150,7 @@ static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
r->have_retry_timer = false;
if (success) {
if (!r->resolving) {
dns_start_resolving_locked(r);
dns_start_resolving_locked(exec_ctx, r);
}
}
gpr_mu_unlock(&r->mu);
@ -201,11 +202,12 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
static void dns_start_resolving_locked(dns_resolver *r) {
static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = 1;
grpc_resolve_address(r->name, r->default_port, dns_on_resolved, r);
grpc_resolve_address(exec_ctx, r->name, r->default_port, dns_on_resolved, r);
}
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,

@ -299,7 +299,7 @@ static void zookeeper_get_children_node_completion(int rc, const char *value,
address = zookeeper_parse_address(value, (size_t)value_len);
if (address != NULL) {
/** Further resolves address by DNS */
grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
grpc_resolve_address(&exec_ctx, address, NULL, zookeeper_dns_resolved, r);
gpr_free(address);
} else {
gpr_log(GPR_ERROR, "Error in resolving a child node of %s", r->name);
@ -375,8 +375,10 @@ static void zookeeper_get_node_completion(int rc, const char *value,
r->resolved_addrs->naddrs = 0;
r->resolved_total = 1;
/** Further resolves address by DNS */
grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resolve_address(&exec_ctx, address, NULL, zookeeper_dns_resolved, r);
gpr_free(address);
grpc_exec_ctx_finish(&exec_ctx);
return;
}

@ -235,5 +235,7 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
grpc_exec_ctx_finish(&exec_ctx);
return channel; /* may be NULL */
return channel != NULL ? channel : grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
}

@ -639,7 +639,7 @@ static int on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
}
}
if (p->on_header == NULL) {
grpc_mdelem_unref(md);
GRPC_MDELEM_UNREF(md);
return 0;
}
p->on_header(p->on_header_user_data, md);

@ -268,8 +268,14 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->default_compression_algorithm =
grpc_channel_args_get_compression_algorithm(args->channel_args);
/* Make sure the default isn't disabled. */
GPR_ASSERT(grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, channeld->default_compression_algorithm));
if (!grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options,
channeld->default_compression_algorithm)) {
gpr_log(GPR_DEBUG,
"compression algorithm %d not enabled: switching to none",
channeld->default_compression_algorithm);
channeld->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
channeld->compression_options.default_compression_algorithm =
channeld->default_compression_algorithm;

@ -246,7 +246,7 @@ static void internal_request_begin(
grpc_pollset_set_add_pollset(exec_ctx, req->context->pollset_set,
req->pollset);
grpc_resolve_address(request->host, req->handshaker->default_port,
grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port,
on_resolved, req);
}

@ -54,6 +54,12 @@ void grpc_closure_list_add(grpc_closure_list *closure_list,
closure_list->tail = closure;
}
void grpc_closure_list_fail_all(grpc_closure_list *list) {
for (grpc_closure *c = list->head; c != NULL; c = grpc_closure_next(c)) {
c->final_data &= ~(uintptr_t)1;
}
}
bool grpc_closure_list_empty(grpc_closure_list closure_list) {
return closure_list.head == NULL;
}

@ -86,6 +86,9 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
bool success);
/** force all success bits in \a list to false */
void grpc_closure_list_fail_all(grpc_closure_list *list);
/** append all closures from \a src to \a dst and empty \a src. */
void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);

@ -92,6 +92,8 @@ void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_shutdown(void);

@ -59,8 +59,9 @@ typedef void (*grpc_resolve_cb)(grpc_exec_ctx *exec_ctx, void *arg,
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
void grpc_resolve_address(const char *addr, const char *default_port,
grpc_resolve_cb cb, void *arg);
extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr,
const char *default_port,
grpc_resolve_cb cb, void *arg);
/* Destroy resolved addresses */
void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addresses);

@ -164,8 +164,9 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
gpr_free(addrs);
}
void grpc_resolve_address(const char *name, const char *default_port,
grpc_resolve_cb cb, void *arg) {
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port, grpc_resolve_cb cb,
void *arg) {
request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r);
r->name = gpr_strdup(name);
@ -175,4 +176,8 @@ void grpc_resolve_address(const char *name, const char *default_port,
grpc_executor_enqueue(&r->request_closure, 1);
}
void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port, grpc_resolve_cb cb,
void *arg) = resolve_address_impl;
#endif

@ -155,8 +155,9 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
gpr_free(addrs);
}
void grpc_resolve_address(const char *name, const char *default_port,
grpc_resolve_cb cb, void *arg) {
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port, grpc_resolve_cb cb,
void *arg) {
request *r = gpr_malloc(sizeof(request));
grpc_closure_init(&r->request_closure, do_request_thread, r);
r->name = gpr_strdup(name);
@ -166,4 +167,8 @@ void grpc_resolve_address(const char *name, const char *default_port,
grpc_executor_enqueue(&r->request_closure, 1);
}
void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
const char *default_port, grpc_resolve_cb cb,
void *arg) = resolve_address_impl;
#endif

@ -211,11 +211,11 @@ finish:
grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL, NULL);
}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const struct sockaddr *addr,
size_t addr_len, gpr_timespec deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
@ -303,4 +303,19 @@ done:
gpr_free(addr_str);
}
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const struct sockaddr *addr,
size_t addr_len, gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
addr_len, deadline);
}
#endif

@ -70,6 +70,7 @@ static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */
static shard_type *g_shard_queue[NUM_SHARDS];
static bool g_initialized = false;
static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next, int success);
@ -83,6 +84,7 @@ static gpr_timespec compute_min_deadline(shard_type *shard) {
void grpc_timer_list_init(gpr_timespec now) {
uint32_t i;
g_initialized = true;
gpr_mu_init(&g_mu);
gpr_mu_init(&g_checker_mu);
g_clock_type = now.clock_type;
@ -111,6 +113,7 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
}
gpr_mu_destroy(&g_mu);
gpr_mu_destroy(&g_checker_mu);
g_initialized = false;
}
/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
@ -180,6 +183,18 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->deadline = deadline;
timer->triggered = 0;
if (!g_initialized) {
timer->triggered = 1;
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
return;
}
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, true, NULL);
return;
}
/* TODO(ctiller): check deadline expired */
gpr_mu_lock(&shard->mu);

@ -78,7 +78,7 @@ static const clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC,
void gpr_time_init(void) { gpr_precise_clock_init(); }
gpr_timespec gpr_now(gpr_clock_type clock_type) {
static gpr_timespec now_impl(gpr_clock_type clock_type) {
struct timespec now;
GPR_ASSERT(clock_type != GPR_TIMESPAN);
if (clock_type == GPR_CLOCK_PRECISE) {
@ -114,7 +114,7 @@ void gpr_time_init(void) {
g_time_start = mach_absolute_time();
}
gpr_timespec gpr_now(gpr_clock_type clock) {
static gpr_timespec now_impl(gpr_clock_type clock) {
gpr_timespec now;
struct timeval now_tv;
double now_dbl;
@ -142,6 +142,12 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
}
#endif
gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type) = now_impl;
gpr_timespec gpr_now(gpr_clock_type clock_type) {
return gpr_now_impl(clock_type);
}
void gpr_sleep_until(gpr_timespec until) {
gpr_timespec now;
gpr_timespec delta;

@ -142,22 +142,23 @@ struct grpc_call {
gpr_mu mu;
/* client or server call */
uint8_t is_client;
bool is_client;
/* is the alarm set */
uint8_t have_alarm;
bool have_alarm;
/** has grpc_call_destroy been called */
uint8_t destroy_called;
bool destroy_called;
/** flag indicating that cancellation is inherited */
uint8_t cancellation_is_inherited;
bool cancellation_is_inherited;
/** bitmask of live batches */
uint8_t used_batches;
/** which ops are in-flight */
uint8_t sent_initial_metadata;
uint8_t sending_message;
uint8_t sent_final_op;
uint8_t received_initial_metadata;
uint8_t receiving_message;
uint8_t received_final_op;
bool sent_initial_metadata;
bool sending_message;
bool sent_final_op;
bool received_initial_metadata;
bool receiving_message;
bool requested_final_op;
bool received_final_op;
/* have we received initial metadata */
bool has_initial_md_been_received;
@ -220,10 +221,7 @@ struct grpc_call {
} server;
} final_op;
struct {
void *bctlp;
bool success;
} saved_receiving_stream_ready_ctx;
void *saved_receiving_stream_ready_bctlp;
};
#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
@ -554,21 +552,6 @@ static int prepare_application_metadata(grpc_call *call, int count,
int i;
grpc_metadata_batch *batch =
&call->metadata_batch[0 /* is_receiving */][is_trailing];
if (prepend_extra_metadata) {
if (call->send_extra_metadata_count == 0) {
prepend_extra_metadata = 0;
} else {
for (i = 0; i < call->send_extra_metadata_count; i++) {
GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
}
for (i = 1; i < call->send_extra_metadata_count; i++) {
call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
}
for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
}
}
}
for (i = 0; i < count; i++) {
grpc_metadata *md = &metadata[i];
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
@ -579,14 +562,37 @@ static int prepare_application_metadata(grpc_call *call, int count,
GRPC_MDSTR_LENGTH(l->md->key))) {
gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
grpc_mdstr_as_c_string(l->md->key));
return 0;
break;
} else if (!grpc_is_binary_header(grpc_mdstr_as_c_string(l->md->key),
GRPC_MDSTR_LENGTH(l->md->key)) &&
!grpc_header_nonbin_value_is_legal(
grpc_mdstr_as_c_string(l->md->value),
GRPC_MDSTR_LENGTH(l->md->value))) {
gpr_log(GPR_ERROR, "attempt to send invalid metadata value");
return 0;
break;
}
}
if (i != count) {
for (int j = 0; j <= i; j++) {
grpc_metadata *md = &metadata[j];
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GRPC_MDELEM_UNREF(l->md);
}
return 0;
}
if (prepend_extra_metadata) {
if (call->send_extra_metadata_count == 0) {
prepend_extra_metadata = 0;
} else {
for (i = 0; i < call->send_extra_metadata_count; i++) {
GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
}
for (i = 1; i < call->send_extra_metadata_count; i++) {
call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
}
for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
}
}
}
for (i = 1; i < count; i++) {
@ -1057,12 +1063,12 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *call = bctl->call;
gpr_mu_lock(&bctl->call->mu);
if (bctl->call->has_initial_md_been_received) {
if (bctl->call->has_initial_md_been_received || !success ||
call->receiving_stream == NULL) {
gpr_mu_unlock(&bctl->call->mu);
process_data_after_md(exec_ctx, bctlp, success);
} else {
call->saved_receiving_stream_ready_ctx.bctlp = bctlp;
call->saved_receiving_stream_ready_ctx.success = success;
call->saved_receiving_stream_ready_bctlp = bctlp;
gpr_mu_unlock(&bctl->call->mu);
}
}
@ -1091,13 +1097,11 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
call->has_initial_md_been_received = true;
if (call->saved_receiving_stream_ready_ctx.bctlp != NULL) {
if (call->saved_receiving_stream_ready_bctlp != NULL) {
grpc_closure *saved_rsr_closure = grpc_closure_create(
receiving_stream_ready, call->saved_receiving_stream_ready_ctx.bctlp);
grpc_exec_ctx_enqueue(
exec_ctx, saved_rsr_closure,
call->saved_receiving_stream_ready_ctx.success && success, NULL);
call->saved_receiving_stream_ready_ctx.bctlp = NULL;
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp);
call->saved_receiving_stream_ready_bctlp = NULL;
grpc_exec_ctx_enqueue(exec_ctx, saved_rsr_closure, success, NULL);
}
gpr_mu_unlock(&call->mu);
@ -1133,6 +1137,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_trailing_filter, call);
call->received_final_op = true;
if (call->have_alarm) {
grpc_timer_cancel(exec_ctx, &call->alarm);
}
@ -1377,11 +1382,11 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_NOT_ON_SERVER;
goto done_with_error;
}
if (call->received_final_op) {
if (call->requested_final_op) {
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
call->received_final_op = 1;
call->requested_final_op = 1;
call->buffered_metadata[1] =
op->data.recv_status_on_client.trailing_metadata;
call->final_op.client.status = op->data.recv_status_on_client.status;
@ -1404,11 +1409,11 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
goto done_with_error;
}
if (call->received_final_op) {
if (call->requested_final_op) {
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
call->received_final_op = 1;
call->requested_final_op = 1;
call->final_op.server.cancelled =
op->data.recv_close_on_server.cancelled;
bctl->recv_final_op = 1;
@ -1457,7 +1462,7 @@ done_with_error:
call->receiving_message = 0;
}
if (bctl->recv_final_op) {
call->received_final_op = 0;
call->requested_final_op = 0;
}
gpr_mu_unlock(&call->mu);
goto done;

@ -99,6 +99,9 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->on_consumed != NULL) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 1);
}
if (op->send_ping != NULL) {
op->send_ping->cb(exec_ctx, op->send_ping->cb_arg, 0);
}
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,

@ -40,7 +40,7 @@ static int conforms_to(const char *s, size_t len, const uint8_t *legal_bits) {
const char *p = s;
const char *e = s + len;
for (; p != e; p++) {
int idx = *p;
int idx = (uint8_t)*p;
int byte = idx / 8;
int bit = idx % 8;
if ((legal_bits[byte] & (1 << bit)) == 0) return 0;

@ -120,6 +120,7 @@ void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data);
/* Reference counting */
//#define GRPC_METADATA_REFCOUNT_DEBUG
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
#define GRPC_MDSTR_REF(s) grpc_mdstr_ref((s), __FILE__, __LINE__)
#define GRPC_MDSTR_UNREF(s) grpc_mdstr_unref((s), __FILE__, __LINE__)

@ -35,7 +35,7 @@ template: |
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
flags="-max_total_time=$runtime -artifact_prefix=fuzzer_output/ -max_len=${selected.maxlen}"
flags="-max_total_time=$runtime -artifact_prefix=fuzzer_output/ -max_len=${selected.maxlen} -timeout=120"
%if selected.get('dict'):
flags="$flags -dict=${selected.dict}"

@ -0,0 +1,896 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <string.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/metadata.h"
#include "test/core/util/passthru_endpoint.h"
////////////////////////////////////////////////////////////////////////////////
// logging
static const bool squelch = true;
static void dont_log(gpr_log_func_args *args) {}
////////////////////////////////////////////////////////////////////////////////
// input_stream: allows easy access to input bytes, and allows reading a little
// past the end (avoiding needing to check everywhere)
typedef struct {
const uint8_t *cur;
const uint8_t *end;
} input_stream;
static uint8_t next_byte(input_stream *inp) {
if (inp->cur == inp->end) {
return 0;
}
return *inp->cur++;
}
static void end(input_stream *inp) { inp->cur = inp->end; }
static char *read_string(input_stream *inp) {
char *str = NULL;
size_t cap = 0;
size_t sz = 0;
char c;
do {
if (cap == sz) {
cap = GPR_MAX(3 * cap / 2, cap + 8);
str = gpr_realloc(str, cap);
}
c = (char)next_byte(inp);
str[sz++] = c;
} while (c != 0);
return str;
}
static void read_buffer(input_stream *inp, char **buffer, size_t *length) {
*length = next_byte(inp);
*buffer = gpr_malloc(*length);
for (size_t i = 0; i < *length; i++) {
(*buffer)[i] = (char)next_byte(inp);
}
}
static uint32_t read_uint22(input_stream *inp) {
uint8_t b = next_byte(inp);
uint32_t x = b & 0x7f;
if (b & 0x80) {
x <<= 7;
b = next_byte(inp);
x |= b & 0x7f;
if (b & 0x80) {
x <<= 8;
x |= next_byte(inp);
}
}
return x;
}
static uint32_t read_uint32(input_stream *inp) {
uint8_t b = next_byte(inp);
uint32_t x = b & 0x7f;
if (b & 0x80) {
x <<= 7;
b = next_byte(inp);
x |= b & 0x7f;
if (b & 0x80) {
x <<= 7;
b = next_byte(inp);
x |= b & 0x7f;
if (b & 0x80) {
x <<= 7;
b = next_byte(inp);
x |= b & 0x7f;
if (b & 0x80) {
x = (x << 4) | (next_byte(inp) & 0x0f);
}
}
}
}
return x;
}
static grpc_byte_buffer *read_message(input_stream *inp) {
gpr_slice slice = gpr_slice_malloc(read_uint22(inp));
memset(GPR_SLICE_START_PTR(slice), 0, GPR_SLICE_LENGTH(slice));
grpc_byte_buffer *out = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return out;
}
static int read_int(input_stream *inp) { return (int)read_uint32(inp); }
static grpc_channel_args *read_args(input_stream *inp) {
size_t n = next_byte(inp);
grpc_arg *args = gpr_malloc(sizeof(*args) * n);
for (size_t i = 0; i < n; i++) {
bool is_string = next_byte(inp) & 1;
args[i].type = is_string ? GRPC_ARG_STRING : GRPC_ARG_INTEGER;
args[i].key = read_string(inp);
if (is_string) {
args[i].value.string = read_string(inp);
} else {
args[i].value.integer = read_int(inp);
}
}
grpc_channel_args *a = gpr_malloc(sizeof(*a));
a->args = args;
a->num_args = n;
return a;
}
static bool is_eof(input_stream *inp) { return inp->cur == inp->end; }
////////////////////////////////////////////////////////////////////////////////
// global state
static gpr_timespec g_now;
static grpc_server *g_server;
static grpc_channel *g_channel;
extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
static gpr_timespec now_impl(gpr_clock_type clock_type) {
GPR_ASSERT(clock_type != GPR_TIMESPAN);
return g_now;
}
////////////////////////////////////////////////////////////////////////////////
// dns resolution
typedef struct addr_req {
grpc_timer timer;
char *addr;
grpc_resolve_cb cb;
void *arg;
} addr_req;
static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
addr_req *r = arg;
if (success && 0 == strcmp(r->addr, "server")) {
grpc_resolved_addresses *addrs = gpr_malloc(sizeof(*addrs));
addrs->naddrs = 1;
addrs->addrs = gpr_malloc(sizeof(*addrs->addrs));
addrs->addrs[0].len = 0;
r->cb(exec_ctx, r->arg, addrs);
} else {
r->cb(exec_ctx, r->arg, NULL);
}
gpr_free(r->addr);
gpr_free(r);
}
void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
const char *default_port, grpc_resolve_cb cb,
void *arg) {
addr_req *r = gpr_malloc(sizeof(*r));
r->addr = gpr_strdup(addr);
r->cb = cb;
r->arg = arg;
grpc_timer_init(exec_ctx, &r->timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)),
finish_resolve, r, gpr_now(GPR_CLOCK_MONOTONIC));
}
////////////////////////////////////////////////////////////////////////////////
// client connection
// defined in tcp_client_posix.c
extern void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const struct sockaddr *addr,
size_t addr_len, gpr_timespec deadline);
static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline);
typedef struct {
grpc_timer timer;
grpc_closure *closure;
grpc_endpoint **ep;
gpr_timespec deadline;
} future_connect;
static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
future_connect *fc = arg;
if (!success) {
*fc->ep = NULL;
grpc_exec_ctx_enqueue(exec_ctx, fc->closure, false, NULL);
} else if (g_server != NULL) {
grpc_endpoint *client;
grpc_endpoint *server;
grpc_passthru_endpoint_create(&client, &server);
*fc->ep = client;
grpc_transport *transport =
grpc_create_chttp2_transport(exec_ctx, NULL, server, 0);
grpc_server_setup_transport(exec_ctx, g_server, transport, NULL);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
grpc_exec_ctx_enqueue(exec_ctx, fc->closure, false, NULL);
} else {
sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
}
gpr_free(fc);
}
static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline) {
if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
*ep = NULL;
grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
return;
}
future_connect *fc = gpr_malloc(sizeof(*fc));
fc->closure = closure;
fc->ep = ep;
fc->deadline = deadline;
grpc_timer_init(exec_ctx, &fc->timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(1, GPR_TIMESPAN)),
do_connect, fc, gpr_now(GPR_CLOCK_MONOTONIC));
}
static void my_tcp_client_connect(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
sched_connect(exec_ctx, closure, ep, deadline);
}
////////////////////////////////////////////////////////////////////////////////
// test driver
typedef struct validator {
void (*validate)(void *arg, bool success);
void *arg;
} validator;
static validator *create_validator(void (*validate)(void *arg, bool success),
void *arg) {
validator *v = gpr_malloc(sizeof(*v));
v->validate = validate;
v->arg = arg;
return v;
}
static void assert_success_and_decrement(void *counter, bool success) {
GPR_ASSERT(success);
--*(int *)counter;
}
static void decrement(void *counter, bool success) { --*(int *)counter; }
typedef struct connectivity_watch {
int *counter;
gpr_timespec deadline;
} connectivity_watch;
static connectivity_watch *make_connectivity_watch(gpr_timespec s,
int *counter) {
connectivity_watch *o = gpr_malloc(sizeof(*o));
o->deadline = s;
o->counter = counter;
return o;
}
static void validate_connectivity_watch(void *p, bool success) {
connectivity_watch *w = p;
if (!success) {
GPR_ASSERT(gpr_time_cmp(gpr_now(w->deadline.clock_type), w->deadline) >= 0);
}
--*w->counter;
gpr_free(w);
}
static void free_non_null(void *p) {
GPR_ASSERT(p != NULL);
gpr_free(p);
}
typedef enum { ROOT, CLIENT, SERVER, PENDING_SERVER } call_state_type;
typedef struct call_state {
call_state_type type;
grpc_call *call;
grpc_byte_buffer *recv_message;
grpc_status_code status;
grpc_metadata_array recv_initial_metadata;
grpc_metadata_array recv_trailing_metadata;
char *recv_status_details;
size_t recv_status_details_capacity;
int cancelled;
int pending_ops;
grpc_call_details call_details;
// array of pointers to free later
size_t num_to_free;
size_t cap_to_free;
void **to_free;
struct call_state *next;
struct call_state *prev;
} call_state;
static call_state *g_active_call;
static call_state *new_call(call_state *sibling, call_state_type type) {
call_state *c = gpr_malloc(sizeof(*c));
memset(c, 0, sizeof(*c));
if (sibling != NULL) {
c->next = sibling;
c->prev = sibling->prev;
c->next->prev = c->prev->next = c;
} else {
c->next = c->prev = c;
}
c->type = type;
return c;
}
static call_state *maybe_delete_call_state(call_state *call) {
call_state *next = call->next;
if (call->call != NULL) return next;
if (call->pending_ops != 0) return next;
if (call == g_active_call) {
g_active_call = call->next;
GPR_ASSERT(call != g_active_call);
}
call->prev->next = call->next;
call->next->prev = call->prev;
grpc_metadata_array_destroy(&call->recv_initial_metadata);
grpc_metadata_array_destroy(&call->recv_trailing_metadata);
gpr_free(call->recv_status_details);
grpc_call_details_destroy(&call->call_details);
for (size_t i = 0; i < call->num_to_free; i++) {
gpr_free(call->to_free[i]);
}
gpr_free(call->to_free);
gpr_free(call);
return next;
}
static void add_to_free(call_state *call, void *p) {
if (call->num_to_free == call->cap_to_free) {
call->cap_to_free = GPR_MAX(8, 2 * call->cap_to_free);
call->to_free =
gpr_realloc(call->to_free, sizeof(*call->to_free) * call->cap_to_free);
}
call->to_free[call->num_to_free++] = p;
}
static void read_metadata(input_stream *inp, size_t *count,
grpc_metadata **metadata, call_state *cs) {
*count = next_byte(inp);
*metadata = gpr_malloc(*count * sizeof(**metadata));
memset(*metadata, 0, *count * sizeof(**metadata));
for (size_t i = 0; i < *count; i++) {
(*metadata)[i].key = read_string(inp);
read_buffer(inp, (char **)&(*metadata)[i].value,
&(*metadata)[i].value_length);
(*metadata)[i].flags = read_uint32(inp);
add_to_free(cs, (void *)(*metadata)[i].key);
add_to_free(cs, (void *)(*metadata)[i].value);
}
add_to_free(cs, *metadata);
}
static call_state *destroy_call(call_state *call) {
grpc_call_destroy(call->call);
call->call = NULL;
return maybe_delete_call_state(call);
}
static void finished_request_call(void *csp, bool success) {
call_state *cs = csp;
GPR_ASSERT(cs->pending_ops > 0);
--cs->pending_ops;
if (success) {
GPR_ASSERT(cs->call != NULL);
cs->type = SERVER;
} else {
maybe_delete_call_state(cs);
}
}
static void finished_batch(void *csp, bool success) {
call_state *cs = csp;
--cs->pending_ops;
maybe_delete_call_state(cs);
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_test_only_set_metadata_hash_seed(0);
if (squelch) gpr_set_log_function(dont_log);
input_stream inp = {data, data + size};
grpc_resolve_address = my_resolve_address;
grpc_tcp_client_connect_impl = my_tcp_client_connect;
gpr_now_impl = now_impl;
grpc_init();
GPR_ASSERT(g_channel == NULL);
GPR_ASSERT(g_server == NULL);
bool server_shutdown = false;
int pending_server_shutdowns = 0;
int pending_channel_watches = 0;
int pending_pings = 0;
g_active_call = new_call(NULL, ROOT);
grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
while (!is_eof(&inp) || g_channel != NULL || g_server != NULL ||
pending_channel_watches > 0 || pending_pings > 0 ||
g_active_call->type != ROOT || g_active_call->next != g_active_call) {
if (is_eof(&inp)) {
if (g_channel != NULL) {
grpc_channel_destroy(g_channel);
g_channel = NULL;
}
if (g_server != NULL) {
if (!server_shutdown) {
grpc_server_shutdown_and_notify(
g_server, cq, create_validator(assert_success_and_decrement,
&pending_server_shutdowns));
server_shutdown = true;
pending_server_shutdowns++;
} else if (pending_server_shutdowns == 0) {
grpc_server_destroy(g_server);
g_server = NULL;
}
}
call_state *s = g_active_call;
do {
if (s->type != PENDING_SERVER && s->call != NULL) {
s = destroy_call(s);
} else {
s = s->next;
}
} while (s != g_active_call);
g_now = gpr_time_add(g_now, gpr_time_from_seconds(1, GPR_TIMESPAN));
}
switch (next_byte(&inp)) {
// terminate on bad bytes
default:
end(&inp);
break;
// tickle completion queue
case 0: {
grpc_event ev = grpc_completion_queue_next(
cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
switch (ev.type) {
case GRPC_OP_COMPLETE: {
validator *v = ev.tag;
v->validate(v->arg, ev.success);
gpr_free(v);
break;
}
case GRPC_QUEUE_TIMEOUT:
break;
case GRPC_QUEUE_SHUTDOWN:
abort();
break;
}
break;
}
// increment global time
case 1: {
g_now = gpr_time_add(
g_now, gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));
break;
}
// create an insecure channel
case 2: {
if (g_channel == NULL) {
char *target = read_string(&inp);
char *target_uri;
gpr_asprintf(&target_uri, "dns:%s", target);
grpc_channel_args *args = read_args(&inp);
g_channel = grpc_insecure_channel_create(target_uri, args, NULL);
GPR_ASSERT(g_channel != NULL);
grpc_channel_args_destroy(args);
gpr_free(target_uri);
gpr_free(target);
} else {
end(&inp);
}
break;
}
// destroy a channel
case 3: {
if (g_channel != NULL) {
grpc_channel_destroy(g_channel);
g_channel = NULL;
} else {
end(&inp);
}
break;
}
// bring up a server
case 4: {
if (g_server == NULL) {
grpc_channel_args *args = read_args(&inp);
g_server = grpc_server_create(args, NULL);
GPR_ASSERT(g_server != NULL);
grpc_channel_args_destroy(args);
grpc_server_register_completion_queue(g_server, cq, NULL);
grpc_server_start(g_server);
server_shutdown = false;
GPR_ASSERT(pending_server_shutdowns == 0);
} else {
end(&inp);
}
}
// begin server shutdown
case 5: {
if (g_server != NULL) {
grpc_server_shutdown_and_notify(
g_server, cq, create_validator(assert_success_and_decrement,
&pending_server_shutdowns));
pending_server_shutdowns++;
server_shutdown = true;
} else {
end(&inp);
}
break;
}
// cancel all calls if shutdown
case 6: {
if (g_server != NULL && server_shutdown) {
grpc_server_cancel_all_calls(g_server);
} else {
end(&inp);
}
break;
}
// destroy server
case 7: {
if (g_server != NULL && server_shutdown &&
pending_server_shutdowns == 0) {
grpc_server_destroy(g_server);
g_server = NULL;
} else {
end(&inp);
}
break;
}
// check connectivity
case 8: {
if (g_channel != NULL) {
uint8_t try_to_connect = next_byte(&inp);
if (try_to_connect == 0 || try_to_connect == 1) {
grpc_channel_check_connectivity_state(g_channel, try_to_connect);
} else {
end(&inp);
}
} else {
end(&inp);
}
break;
}
// watch connectivity
case 9: {
if (g_channel != NULL) {
grpc_connectivity_state st =
grpc_channel_check_connectivity_state(g_channel, 0);
if (st != GRPC_CHANNEL_FATAL_FAILURE) {
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));
grpc_channel_watch_connectivity_state(
g_channel, st, deadline, cq,
create_validator(validate_connectivity_watch,
make_connectivity_watch(
deadline, &pending_channel_watches)));
pending_channel_watches++;
}
} else {
end(&inp);
}
break;
}
// create a call
case 10: {
bool ok = true;
if (g_channel == NULL) ok = false;
grpc_call *parent_call = NULL;
if (g_active_call->type != ROOT) {
if (g_active_call->call == NULL || g_active_call->type == CLIENT) {
end(&inp);
break;
}
parent_call = g_active_call->call;
}
uint32_t propagation_mask = read_uint32(&inp);
char *method = read_string(&inp);
char *host = read_string(&inp);
gpr_timespec deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));
if (ok) {
call_state *cs = new_call(g_active_call, CLIENT);
cs->call =
grpc_channel_create_call(g_channel, parent_call, propagation_mask,
cq, method, host, deadline, NULL);
} else {
end(&inp);
}
gpr_free(method);
gpr_free(host);
break;
}
// switch the 'current' call
case 11: {
g_active_call = g_active_call->next;
break;
}
// queue some ops on a call
case 12: {
if (g_active_call->type == PENDING_SERVER ||
g_active_call->type == ROOT || g_active_call->call == NULL) {
end(&inp);
break;
}
size_t num_ops = next_byte(&inp);
if (num_ops > 6) {
end(&inp);
break;
}
grpc_op *ops = gpr_malloc(sizeof(grpc_op) * num_ops);
bool ok = true;
size_t i;
grpc_op *op;
for (i = 0; i < num_ops; i++) {
op = &ops[i];
switch (next_byte(&inp)) {
default:
/* invalid value */
op->op = (grpc_op_type)-1;
ok = false;
break;
case GRPC_OP_SEND_INITIAL_METADATA:
op->op = GRPC_OP_SEND_INITIAL_METADATA;
read_metadata(&inp, &op->data.send_initial_metadata.count,
&op->data.send_initial_metadata.metadata,
g_active_call);
break;
case GRPC_OP_SEND_MESSAGE:
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = read_message(&inp);
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
read_metadata(
&inp,
&op->data.send_status_from_server.trailing_metadata_count,
&op->data.send_status_from_server.trailing_metadata,
g_active_call);
op->data.send_status_from_server.status = next_byte(&inp);
op->data.send_status_from_server.status_details =
read_string(&inp);
break;
case GRPC_OP_RECV_INITIAL_METADATA:
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata =
&g_active_call->recv_initial_metadata;
break;
case GRPC_OP_RECV_MESSAGE:
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &g_active_call->recv_message;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.status = &g_active_call->status;
op->data.recv_status_on_client.trailing_metadata =
&g_active_call->recv_trailing_metadata;
op->data.recv_status_on_client.status_details =
&g_active_call->recv_status_details;
op->data.recv_status_on_client.status_details_capacity =
&g_active_call->recv_status_details_capacity;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled =
&g_active_call->cancelled;
break;
}
op->reserved = NULL;
op->flags = read_uint32(&inp);
}
if (ok) {
validator *v = create_validator(finished_batch, g_active_call);
g_active_call->pending_ops++;
grpc_call_error error =
grpc_call_start_batch(g_active_call->call, ops, num_ops, v, NULL);
if (error != GRPC_CALL_OK) {
v->validate(v->arg, false);
gpr_free(v);
}
} else {
end(&inp);
}
for (i = 0; i < num_ops; i++) {
op = &ops[i];
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
break;
case GRPC_OP_SEND_MESSAGE:
grpc_byte_buffer_destroy(op->data.send_message);
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
gpr_free((void *)op->data.send_status_from_server.status_details);
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
case GRPC_OP_RECV_CLOSE_ON_SERVER:
break;
}
}
gpr_free(ops);
break;
}
// cancel current call
case 13: {
if (g_active_call->type != ROOT && g_active_call->call != NULL) {
grpc_call_cancel(g_active_call->call, NULL);
} else {
end(&inp);
}
break;
}
// get a calls peer
case 14: {
if (g_active_call->type != ROOT && g_active_call->call != NULL) {
free_non_null(grpc_call_get_peer(g_active_call->call));
} else {
end(&inp);
}
break;
}
// get a channels target
case 15: {
if (g_channel != NULL) {
free_non_null(grpc_channel_get_target(g_channel));
} else {
end(&inp);
}
break;
}
// send a ping on a channel
case 16: {
if (g_channel != NULL) {
pending_pings++;
grpc_channel_ping(g_channel, cq,
create_validator(decrement, &pending_pings), NULL);
} else {
end(&inp);
}
break;
}
// enable a tracer
case 17: {
char *tracer = read_string(&inp);
grpc_tracer_set_enabled(tracer, 1);
gpr_free(tracer);
break;
}
// disable a tracer
case 18: {
char *tracer = read_string(&inp);
grpc_tracer_set_enabled(tracer, 0);
gpr_free(tracer);
break;
}
// request a server call
case 19: {
if (g_server == NULL) {
end(&inp);
break;
}
call_state *cs = new_call(g_active_call, PENDING_SERVER);
cs->pending_ops++;
validator *v = create_validator(finished_request_call, cs);
grpc_call_error error =
grpc_server_request_call(g_server, &cs->call, &cs->call_details,
&cs->recv_initial_metadata, cq, cq, v);
if (error != GRPC_CALL_OK) {
v->validate(v->arg, false);
gpr_free(v);
}
break;
}
// destroy a call
case 20: {
if (g_active_call->type != ROOT &&
g_active_call->type != PENDING_SERVER &&
g_active_call->call != NULL) {
destroy_call(g_active_call);
} else {
end(&inp);
}
break;
}
}
}
GPR_ASSERT(g_channel == NULL);
GPR_ASSERT(g_server == NULL);
GPR_ASSERT(g_active_call->type == ROOT);
GPR_ASSERT(g_active_call->next == g_active_call);
gpr_free(g_active_call);
grpc_completion_queue_shutdown(cq);
GPR_ASSERT(
grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL)
.type == GRPC_QUEUE_SHUTDOWN);
grpc_completion_queue_destroy(cq);
grpc_shutdown();
return 0;
}

@ -0,0 +1,27 @@
# tracers
"api\x00"
"channel\x00"
"channel_stack_builder\x00"
"connectivity_state\x00"
"flowctl\x00"
"http\x00"
"http1\x00"
"round_robin\x00"
"secure_endpoint\x00"
"tcp\x00"
"transport_security\x00"
# channel args
"\x00grpc.census\x00"
"\x00grpc.max_concurrent_streams\x00"
"\x00grpc.max_message_length\x00"
"\x00grpc.http2.initial_sequence_number\x00"
"\x00grpc.http2.lookahead_bytes\x00"
"\x00grpc.http2.hpack_table_size.decoder\x00"
"\x00grpc.http2.hpack_table_size.encoder\x00"
"\x01grpc.default_authority\x00"
"\x01grpc.primary_user_agent\x00"
"\x01grpc.secondary_user_agent\x00"
"\x00grpc.max_reconnect_backoff_ms\x00"
"\x01grpc.ssl_target_name_override\x00"

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save