Merge github.com:grpc/grpc into tsan-c++

pull/10765/head
Craig Tiller 8 years ago
commit c4c296e2d8
  1. 2
      CMakeLists.txt
  2. 18
      Makefile
  3. 6
      WORKSPACE
  4. 4
      binding.gyp
  5. 12
      build.yaml
  6. 57
      config.m4
  7. 4
      grpc.def
  8. 125
      include/grpc++/impl/codegen/async_stream.h
  9. 77
      include/grpc++/impl/codegen/async_unary_call.h
  10. 31
      include/grpc++/impl/codegen/call.h
  11. 4
      include/grpc++/impl/codegen/core_codegen.h
  12. 4
      include/grpc++/impl/codegen/core_codegen_interface.h
  13. 16
      include/grpc/grpc.h
  14. 73
      package.xml
  15. 15
      src/compiler/cpp_generator.cc
  16. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  17. 22
      src/core/lib/surface/call.c
  18. 2
      src/core/lib/surface/server.c
  19. 2
      src/cpp/client/channel_cc.cc
  20. 2
      src/cpp/client/client_context.cc
  21. 2
      src/cpp/client/generic_stub.cc
  22. 6
      src/cpp/common/core_codegen.cc
  23. 2
      src/cpp/server/server_cc.cc
  24. 7
      src/cpp/server/server_context.cc
  25. 2
      src/csharp/ext/grpc_csharp_ext.c
  26. 4
      src/node/ext/call.cc
  27. 16
      src/node/performance/benchmark_client.js
  28. 10
      src/node/performance/benchmark_client_express.js
  29. 15
      src/node/performance/benchmark_server.js
  30. 8
      src/node/performance/benchmark_server_express.js
  31. 2
      src/objective-c/GRPCClient/private/GRPCWrappedCall.m
  32. 4
      src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
  33. 2
      src/php/ext/grpc/call.c
  34. 2
      src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
  35. 2
      src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
  36. 69
      src/ruby/end2end/forking_client_client.rb
  37. 69
      src/ruby/end2end/forking_client_driver.rb
  38. 77
      src/ruby/end2end/grpc_class_init_client.rb
  39. 67
      src/ruby/end2end/grpc_class_init_driver.rb
  40. 2
      src/ruby/ext/grpc/rb_call.c
  41. 4
      src/ruby/ext/grpc/rb_call_credentials.c
  42. 75
      src/ruby/ext/grpc/rb_channel.c
  43. 2
      src/ruby/ext/grpc/rb_channel.h
  44. 3
      src/ruby/ext/grpc/rb_channel_credentials.c
  45. 7
      src/ruby/ext/grpc/rb_compression_options.c
  46. 21
      src/ruby/ext/grpc/rb_grpc.c
  47. 2
      src/ruby/ext/grpc/rb_grpc.h
  48. 8
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  49. 12
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  50. 6
      src/ruby/ext/grpc/rb_server.c
  51. 4
      templates/binding.gyp.template
  52. 7
      templates/config.m4.template
  53. 2
      test/core/bad_client/tests/head_of_line_blocking.c
  54. 4
      test/core/bad_client/tests/large_metadata.c
  55. 4
      test/core/bad_client/tests/server_registered_method.c
  56. 2
      test/core/bad_client/tests/simple_request.c
  57. 2
      test/core/bad_ssl/bad_ssl_test.c
  58. 6
      test/core/client_channel/lb_policies_test.c
  59. 2
      test/core/end2end/bad_server_response_test.c
  60. 2
      test/core/end2end/connection_refused_test.c
  61. 4
      test/core/end2end/dualstack_socket_test.c
  62. 2
      test/core/end2end/fixtures/h2_ssl_cert.c
  63. 4
      test/core/end2end/fixtures/proxy.c
  64. 2
      test/core/end2end/fuzzers/api_fuzzer.c
  65. 2
      test/core/end2end/fuzzers/client_fuzzer.c
  66. 2
      test/core/end2end/fuzzers/server_fuzzer.c
  67. 66
      test/core/end2end/gen_build_yaml.py
  68. 8
      test/core/end2end/goaway_server_test.c
  69. 4
      test/core/end2end/invalid_call_argument_test.c
  70. 2
      test/core/end2end/no_server_test.c
  71. 2
      test/core/end2end/tests/authority_not_supported.c
  72. 2
      test/core/end2end/tests/bad_hostname.c
  73. 4
      test/core/end2end/tests/bad_ping.c
  74. 4
      test/core/end2end/tests/binary_metadata.c
  75. 6
      test/core/end2end/tests/call_creds.c
  76. 4
      test/core/end2end/tests/cancel_after_accept.c
  77. 4
      test/core/end2end/tests/cancel_after_client_done.c
  78. 2
      test/core/end2end/tests/cancel_after_invoke.c
  79. 2
      test/core/end2end/tests/cancel_before_invoke.c
  80. 2
      test/core/end2end/tests/cancel_in_a_vacuum.c
  81. 2
      test/core/end2end/tests/cancel_with_status.c
  82. 8
      test/core/end2end/tests/compressed_payload.c
  83. 4
      test/core/end2end/tests/default_host.c
  84. 4
      test/core/end2end/tests/disappearing_server.c
  85. 2
      test/core/end2end/tests/empty_batch.c
  86. 8
      test/core/end2end/tests/filter_call_init_fails.c
  87. 2
      test/core/end2end/tests/filter_causes_close.c
  88. 4
      test/core/end2end/tests/filter_latency.c
  89. 4
      test/core/end2end/tests/graceful_server_shutdown.c
  90. 4
      test/core/end2end/tests/high_initial_seqno.c
  91. 4
      test/core/end2end/tests/hpack_size.c
  92. 4
      test/core/end2end/tests/idempotent_request.c
  93. 4
      test/core/end2end/tests/invoke_large_request.c
  94. 4
      test/core/end2end/tests/keepalive_timeout.c
  95. 4
      test/core/end2end/tests/large_metadata.c
  96. 4
      test/core/end2end/tests/load_reporting_hook.c
  97. 26
      test/core/end2end/tests/max_concurrent_streams.c
  98. 8
      test/core/end2end/tests/max_connection_age.c
  99. 4
      test/core/end2end/tests/max_connection_idle.c
  100. 8
      test/core/end2end/tests/max_message_length.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -9775,6 +9775,8 @@ target_include_directories(codegen_test_minimal
target_link_libraries(codegen_test_minimal
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc
gpr
${_gRPC_GFLAGS_LIBRARIES}
)

@ -14193,28 +14193,28 @@ $(BINDIR)/$(CONFIG)/codegen_test_minimal: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS)
$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal
$(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o:
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o:
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_codegen_test_minimal: $(CODEGEN_TEST_MINIMAL_OBJS:.o=.dep)

@ -77,12 +77,6 @@ local_repository(
path = "third_party/gflags",
)
git_repository(
name = "mongoose_repo",
commit = "4120a97945b41195a6223a600dae8e3b19bed19e",
remote = "https://github.com/makdharma/mongoose.git"
)
new_local_repository(
name = "submodule_benchmark",
path = "third_party/benchmark",

@ -507,7 +507,7 @@
},
]
}],
['OS == "win"', {
['OS == "win" and runtime!="electron"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
@ -518,6 +518,8 @@
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
#
# This is not true of Electron, which does not have OpenSSL headers.
'target_name': 'WINDOWS_BUILD_WARNING',
'rules': [
{

@ -1516,6 +1516,7 @@ libs:
- global
targets:
- name: alarm_test
cpu_cost: 0.1
build: test
language: c
src:
@ -1699,7 +1700,7 @@ targets:
dict: test/core/end2end/fuzzers/hpack.dictionary
maxlen: 2048
- name: combiner_test
cpu_cost: 30
cpu_cost: 10
build: test
language: c
src:
@ -1720,6 +1721,7 @@ targets:
- gpr_test_util
- gpr
- name: concurrent_connectivity_test
cpu_cost: 2.0
build: test
language: c
src:
@ -1806,6 +1808,7 @@ targets:
- gpr_test_util
- gpr
- name: ev_epoll_linux_test
cpu_cost: 3
build: test
language: c
src:
@ -1975,6 +1978,7 @@ targets:
- gpr_test_util
- gpr
- name: gpr_cpu_test
cpu_cost: 30
build: test
language: c
src:
@ -2024,7 +2028,7 @@ targets:
- gpr_test_util
- gpr
- name: gpr_spinlock_test
cpu_cost: 10
cpu_cost: 3
build: test
language: c
src:
@ -3549,6 +3553,9 @@ targets:
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/stats.proto
- test/cpp/codegen/codegen_test_minimal.cc
deps:
- grpc
- gpr
filegroups:
- grpc++_codegen_base
- grpc++_codegen_base_src
@ -4488,6 +4495,7 @@ php_config_m4:
deps:
- grpc
- gpr
- ares
- boringssl
headers:
- src/php/ext/grpc/byte_buffer.h

@ -8,6 +8,8 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/src/php/ext/grpc)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/boringssl/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/cares)
LIBS="-lpthread $LIBS"
@ -18,8 +20,11 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_LIBRARY(dl)
case $host in
*darwin*) ;;
*darwin*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_darwin)
;;
*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_linux)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;
@ -622,6 +627,55 @@ if test "$PHP_GRPC" != "no"; then
third_party/boringssl/ssl/tls13_server.c \
third_party/boringssl/ssl/tls_method.c \
third_party/boringssl/ssl/tls_record.c \
third_party/cares/cares/ares__close_sockets.c \
third_party/cares/cares/ares__get_hostent.c \
third_party/cares/cares/ares__read_line.c \
third_party/cares/cares/ares__timeval.c \
third_party/cares/cares/ares_cancel.c \
third_party/cares/cares/ares_create_query.c \
third_party/cares/cares/ares_data.c \
third_party/cares/cares/ares_destroy.c \
third_party/cares/cares/ares_expand_name.c \
third_party/cares/cares/ares_expand_string.c \
third_party/cares/cares/ares_fds.c \
third_party/cares/cares/ares_free_hostent.c \
third_party/cares/cares/ares_free_string.c \
third_party/cares/cares/ares_getenv.c \
third_party/cares/cares/ares_gethostbyaddr.c \
third_party/cares/cares/ares_gethostbyname.c \
third_party/cares/cares/ares_getnameinfo.c \
third_party/cares/cares/ares_getopt.c \
third_party/cares/cares/ares_getsock.c \
third_party/cares/cares/ares_init.c \
third_party/cares/cares/ares_library_init.c \
third_party/cares/cares/ares_llist.c \
third_party/cares/cares/ares_mkquery.c \
third_party/cares/cares/ares_nowarn.c \
third_party/cares/cares/ares_options.c \
third_party/cares/cares/ares_parse_a_reply.c \
third_party/cares/cares/ares_parse_aaaa_reply.c \
third_party/cares/cares/ares_parse_mx_reply.c \
third_party/cares/cares/ares_parse_naptr_reply.c \
third_party/cares/cares/ares_parse_ns_reply.c \
third_party/cares/cares/ares_parse_ptr_reply.c \
third_party/cares/cares/ares_parse_soa_reply.c \
third_party/cares/cares/ares_parse_srv_reply.c \
third_party/cares/cares/ares_parse_txt_reply.c \
third_party/cares/cares/ares_platform.c \
third_party/cares/cares/ares_process.c \
third_party/cares/cares/ares_query.c \
third_party/cares/cares/ares_search.c \
third_party/cares/cares/ares_send.c \
third_party/cares/cares/ares_strcasecmp.c \
third_party/cares/cares/ares_strdup.c \
third_party/cares/cares/ares_strerror.c \
third_party/cares/cares/ares_timeout.c \
third_party/cares/cares/ares_version.c \
third_party/cares/cares/ares_writev.c \
third_party/cares/cares/bitncmp.c \
third_party/cares/cares/inet_net_pton.c \
third_party/cares/cares/inet_ntop.c \
third_party/cares/cares/windows_port.c \
, $ext_shared, , -Wall -Werror \
-Wno-parentheses-equality -Wno-unused-value -std=c11 \
-fvisibility=hidden -DOPENSSL_NO_ASM -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN \
@ -724,5 +778,6 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/crypto/x509)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/crypto/x509v3)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/ssl)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/cares/cares)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/nanopb)
fi

@ -70,6 +70,7 @@ EXPORTS
grpc_channel_ping
grpc_channel_register_call
grpc_channel_create_registered_call
grpc_call_arena_alloc
grpc_call_start_batch
grpc_call_get_peer
grpc_census_call_set_context
@ -81,7 +82,8 @@ EXPORTS
grpc_channel_destroy
grpc_call_cancel
grpc_call_cancel_with_status
grpc_call_destroy
grpc_call_ref
grpc_call_unref
grpc_server_request_call
grpc_server_register_method
grpc_server_request_registered_call

@ -145,17 +145,19 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
/// Create a stream and write the first request out.
template <class W>
ClientAsyncReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const W& request, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
call_.PerformOps(&init_ops_);
static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReader));
}
void ReadInitialMetadata(void* tag) override {
@ -185,6 +187,19 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
private:
template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request,
void* tag)
: context_(context), call_(call) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
call_.PerformOps(&init_ops_);
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
@ -210,23 +225,19 @@ template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
template <class R>
ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
R* response, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncWriter));
}
void ReadInitialMetadata(void* tag) override {
@ -271,6 +282,24 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
private:
template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, void* tag)
: context_(context), call_(call) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
@ -298,21 +327,20 @@ template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
ClientAsyncReaderWriter(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context, void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReaderWriter));
}
void ReadInitialMetadata(void* tag) override {
@ -366,6 +394,21 @@ class ClientAsyncReaderWriter final
}
private:
ClientAsyncReaderWriter(Call call, ClientContext* context, void* tag)
: context_(context), call_(call) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#define GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#include <assert.h>
#include <grpc++/impl/codegen/call.h>
#include <grpc++/impl/codegen/channel_interface.h>
#include <grpc++/impl/codegen/client_context.h>
@ -59,48 +60,60 @@ class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
template <class W>
ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const W& request)
: context_(context),
call_(channel->CreateCall(method, context, cq)),
collection_(std::make_shared<CallOpSetCollection>()) {
collection_->init_buf_.SetCollection(collection_);
collection_->init_buf_.SendInitialMetadata(
context->send_initial_metadata_, context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(collection_->init_buf_.SendMessage(request).ok());
collection_->init_buf_.ClientSendClose();
call_.PerformOps(&collection_->init_buf_);
static ClientAsyncResponseReader* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context,
const W& request) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncResponseReader));
}
void ReadInitialMetadata(void* tag) {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
collection_->meta_buf_.SetCollection(collection_);
collection_->meta_buf_.set_output_tag(tag);
collection_->meta_buf_.RecvInitialMetadata(context_);
call_.PerformOps(&collection_->meta_buf_);
meta_buf_.set_output_tag(tag);
meta_buf_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
}
void Finish(R* msg, Status* status, void* tag) {
collection_->finish_buf_.SetCollection(collection_);
collection_->finish_buf_.set_output_tag(tag);
finish_buf_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
collection_->finish_buf_.RecvInitialMetadata(context_);
finish_buf_.RecvInitialMetadata(context_);
}
collection_->finish_buf_.RecvMessage(msg);
collection_->finish_buf_.AllowNoMessage();
collection_->finish_buf_.ClientRecvStatus(context_, status);
call_.PerformOps(&collection_->finish_buf_);
finish_buf_.RecvMessage(msg);
finish_buf_.AllowNoMessage();
finish_buf_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
}
private:
ClientContext* context_;
ClientContext* const context_;
Call call_;
class CallOpSetCollection : public CallOpSetCollectionInterface {
public:
template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request)
: context_(context), call_(call) {
init_buf_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_buf_.SendMessage(request).ok());
init_buf_.ClientSendClose();
call_.PerformOps(&init_buf_);
}
// disable operator new
static void* operator new(std::size_t size);
static void* operator new(std::size_t size, void* p) { return p; };
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
init_buf_;
@ -109,8 +122,6 @@ class ClientAsyncResponseReader final
CallOpClientRecvStatus>
finish_buf_;
};
std::shared_ptr<CallOpSetCollection> collection_;
};
template <class W>
class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
@ -179,4 +190,12 @@ class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
} // namespace grpc
namespace std {
template <class R>
class default_delete<grpc::ClientAsyncResponseReader<R>> {
public:
void operator()(void* p) {}
};
}
#endif // GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_CODEGEN_CALL_H
#define GRPCXX_IMPL_CODEGEN_CALL_H
#include <assert.h>
#include <cstring>
#include <functional>
#include <map>
@ -49,6 +50,7 @@
#include <grpc++/impl/codegen/status.h>
#include <grpc++/impl/codegen/string_ref.h>
#include <grpc/impl/codegen/atm.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/grpc_types.h>
@ -578,17 +580,6 @@ class CallOpClientRecvStatus {
grpc_slice error_message_;
};
/// An abstract collection of CallOpSet's, to be used whenever
/// CallOpSet objects must be thought of as a group. Each member
/// of the group should have a shared_ptr back to the collection,
/// as will the object that instantiates the collection, allowing
/// for automatic ref-counting. In practice, any actual use should
/// derive from this base class. This is specifically necessary if
/// some of the CallOpSet's in the collection are "Sneaky" and don't
/// report back to the C++ layer CQ operations
class CallOpSetCollectionInterface
: public std::enable_shared_from_this<CallOpSetCollectionInterface> {};
/// An abstract collection of call ops, used to generate the
/// grpc_call_op structure to pass down to the lower layers,
/// and as it is-a CompletionQueueTag, also massages the final
@ -596,18 +587,9 @@ class CallOpSetCollectionInterface
/// API.
class CallOpSetInterface : public CompletionQueueTag {
public:
CallOpSetInterface() {}
/// Fills in grpc_op, starting from ops[*nops] and moving
/// upwards.
virtual void FillOps(grpc_op* ops, size_t* nops) = 0;
/// Mark this as belonging to a collection if needed
void SetCollection(std::shared_ptr<CallOpSetCollectionInterface> collection) {
collection_ = collection;
}
protected:
std::shared_ptr<CallOpSetCollectionInterface> collection_;
virtual void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) = 0;
};
/// Primary implementaiton of CallOpSetInterface.
@ -628,13 +610,15 @@ class CallOpSet : public CallOpSetInterface,
public Op6 {
public:
CallOpSet() : return_tag_(this) {}
void FillOps(grpc_op* ops, size_t* nops) override {
void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) override {
this->Op1::AddOp(ops, nops);
this->Op2::AddOp(ops, nops);
this->Op3::AddOp(ops, nops);
this->Op4::AddOp(ops, nops);
this->Op5::AddOp(ops, nops);
this->Op6::AddOp(ops, nops);
g_core_codegen_interface->grpc_call_ref(call);
call_ = call;
}
bool FinalizeResult(void** tag, bool* status) override {
@ -645,7 +629,7 @@ class CallOpSet : public CallOpSetInterface,
this->Op5::FinishOp(status);
this->Op6::FinishOp(status);
*tag = return_tag_;
collection_.reset(); // drop the ref at this point
g_core_codegen_interface->grpc_call_unref(call_);
return true;
}
@ -653,6 +637,7 @@ class CallOpSet : public CallOpSetInterface,
private:
void* return_tag_;
grpc_call* call_;
};
/// A CallOpSet that does not post completions to the completion queue.

@ -68,6 +68,10 @@ class CoreCodegen : public CoreCodegenInterface {
void gpr_cv_signal(gpr_cv* cv) override;
void gpr_cv_broadcast(gpr_cv* cv) override;
void grpc_call_ref(grpc_call* call) override;
void grpc_call_unref(grpc_call* call) override;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) override;
void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,

@ -96,6 +96,10 @@ class CoreCodegenInterface {
virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) = 0;
virtual void grpc_call_ref(grpc_call* call) = 0;
virtual void grpc_call_unref(grpc_call* call) = 0;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) = 0;
virtual grpc_slice grpc_empty_slice() = 0;
virtual grpc_slice grpc_slice_malloc(size_t length) = 0;
virtual void grpc_slice_unref(grpc_slice slice) = 0;

@ -265,6 +265,10 @@ GRPCAPI grpc_call *grpc_channel_create_registered_call(
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved);
/** Allocate memory in the grpc_call arena: this memory is automatically
discarded at call completion */
GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size);
/** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance.
@ -341,7 +345,7 @@ GRPCAPI void grpc_channel_destroy(grpc_channel *channel);
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
are thread-safe, and can be called at any point before grpc_call_unref
is called.*/
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
@ -356,9 +360,13 @@ GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
const char *description,
void *reserved);
/** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
GRPCAPI void grpc_call_destroy(grpc_call *call);
/** Ref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
GRPCAPI void grpc_call_ref(grpc_call *call);
/** Unref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
GRPCAPI void grpc_call_unref(grpc_call *call);
/** Request notification of a new call.
Once a call is received, a notification tagged with \a tag_new is added to

@ -1028,6 +1028,79 @@
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls13_server.c" role="src" />
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls_method.c" role="src" />
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls_record.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_data.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_dns.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getenv.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getopt.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_inet_net_pton.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_iphlpapi.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_ipv6.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_library_init.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_llist.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_nowarn.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_platform.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_private.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_rules.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_setup.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strcasecmp.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strdup.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_version.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/bitncmp.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/config-win32.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/setup_once.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/ares_build.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/config_linux/ares_config.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/config_darwin/ares_config.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__close_sockets.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__get_hostent.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__read_line.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__timeval.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_cancel.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_create_query.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_data.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_destroy.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_expand_name.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_expand_string.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_fds.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_free_hostent.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_free_string.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getenv.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_gethostbyaddr.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_gethostbyname.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getnameinfo.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getopt.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getsock.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_init.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_library_init.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_llist.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_mkquery.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_nowarn.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_options.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_a_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_aaaa_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_mx_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_naptr_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_ns_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_ptr_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_soa_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_srv_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_txt_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_platform.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_process.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_query.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_search.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_send.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strcasecmp.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strdup.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strerror.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_timeout.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_version.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_writev.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/bitncmp.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/inet_net_pton.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/inet_ntop.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/windows_port.c" role="src" />
</dir>
</contents>
<dependencies>

@ -1106,8 +1106,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return new "
"::grpc::ClientAsyncResponseReader< $Response$>("
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request);\n"
@ -1129,7 +1129,7 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new ::grpc::ClientAsyncWriter< $Request$>("
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, tag);\n"
@ -1152,7 +1152,7 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new ::grpc::ClientAsyncReader< $Response$>("
" return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, tag);\n"
@ -1174,9 +1174,10 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>("
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"

@ -1153,7 +1153,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_destroy(glb_policy->lb_call);
grpc_call_unref(glb_policy->lb_call);
glb_policy->lb_call = NULL;
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);

@ -160,6 +160,7 @@ typedef struct {
} child_call;
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
@ -170,7 +171,7 @@ struct grpc_call {
/* client or server call */
bool is_client;
/** has grpc_call_destroy been called */
/** has grpc_call_unref been called */
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
@ -282,6 +283,10 @@ static void add_init_error(grpc_error **composite, grpc_error *new) {
*composite = grpc_error_add_child(*composite, new);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
return gpr_arena_alloc(call->arena, size);
}
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
@ -312,6 +317,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
call = gpr_arena_alloc(arena,
sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
*out_call = call;
call->channel = args->channel;
@ -408,7 +414,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
call->send_deadline = send_deadline;
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
/* initial refcount dropped by grpc_call_unref */
grpc_call_element_args call_args = {
.call_stack = CALL_STACK_FROM_CALL(call),
.server_transport_data = args->server_transport_data,
@ -533,12 +539,16 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GPR_TIMER_END("destroy_call", 0);
}
void grpc_call_destroy(grpc_call *c) {
void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
child_call *cc = c->child_call;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_destroy", 0);
GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
GPR_TIMER_BEGIN("grpc_call_unref", 0);
GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
if (cc) {
parent_call *pc = get_parent_call(cc->parent);
@ -565,7 +575,7 @@ void grpc_call_destroy(grpc_call *c) {
}
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_destroy", 0);
GPR_TIMER_END("grpc_call_unref", 0);
}
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {

@ -345,7 +345,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
grpc_call_destroy(grpc_call_from_top_element(elem));
grpc_call_unref(grpc_call_from_top_element(elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,

@ -131,7 +131,7 @@ void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
ops->FillOps(cops, &nops);
ops->FillOps(call->call(), cops, &nops);
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call->call(), cops, nops, ops, nullptr));
}

@ -74,7 +74,7 @@ ClientContext::ClientContext()
ClientContext::~ClientContext() {
if (call_) {
grpc_call_destroy(call_);
grpc_call_unref(call_);
}
g_client_callbacks->Destructor(this);
}

@ -42,7 +42,7 @@ std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::Call(
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>(
new GenericClientAsyncReaderWriter(
GenericClientAsyncReaderWriter::Create(
channel_.get(), cq,
RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), context, tag));
}

@ -96,6 +96,12 @@ void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
::grpc_byte_buffer_destroy(bb);
}
void CoreCodegen::grpc_call_ref(grpc_call* call) { ::grpc_call_ref(call); }
void CoreCodegen::grpc_call_unref(grpc_call* call) { ::grpc_call_unref(call); }
void* CoreCodegen::grpc_call_arena_alloc(grpc_call* call, size_t length) {
return ::grpc_call_arena_alloc(call, length);
}
int CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
grpc_byte_buffer* buffer) {
return ::grpc_byte_buffer_reader_init(reader, buffer);

@ -607,7 +607,7 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
ops->FillOps(cops, &nops);
ops->FillOps(call->call(), cops, &nops);
auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
GPR_ASSERT(GRPC_CALL_OK == result);
}

@ -62,7 +62,7 @@ class ServerContext::CompletionOp final : public CallOpSetInterface {
finalized_(false),
cancelled_(0) {}
void FillOps(grpc_op* ops, size_t* nops) override;
void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) override;
bool FinalizeResult(void** tag, bool* status) override;
bool CheckCancelled(CompletionQueue* cq) {
@ -100,7 +100,8 @@ void ServerContext::CompletionOp::Unref() {
}
}
void ServerContext::CompletionOp::FillOps(grpc_op* ops, size_t* nops) {
void ServerContext::CompletionOp::FillOps(grpc_call* call, grpc_op* ops,
size_t* nops) {
ops->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops->data.recv_close_on_server.cancelled = &cancelled_;
ops->flags = 0;
@ -151,7 +152,7 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
ServerContext::~ServerContext() {
if (call_) {
grpc_call_destroy(call_);
grpc_call_unref(call_);
}
if (completion_op_) {
completion_op_->Unref();

@ -526,7 +526,7 @@ GPR_EXPORT char *GPR_CALLTYPE grpcsharp_call_get_peer(grpc_call *call) {
GPR_EXPORT void GPR_CALLTYPE gprsharp_free(void *p) { gpr_free(p); }
GPR_EXPORT void GPR_CALLTYPE grpcsharp_call_destroy(grpc_call *call) {
grpc_call_destroy(call);
grpc_call_unref(call);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_unary(

@ -248,6 +248,7 @@ class SendMessageOp : public Op {
out->data.send_message.send_message = send_message;
return true;
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
@ -264,6 +265,7 @@ class SendClientCloseOp : public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
bool ParseOp(Local<Value> value, grpc_op *out) { return true; }
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
@ -501,7 +503,7 @@ void DestroyTag(void *tag) {
void Call::DestroyCall() {
if (this->wrapped_call != NULL) {
grpc_call_destroy(this->wrapped_call);
grpc_call_unref(this->wrapped_call);
this->wrapped_call = NULL;
}
}

@ -88,7 +88,10 @@ function timeDiffToNanos(time_diff) {
*/
function BenchmarkClient(server_targets, channels, histogram_params,
security_params) {
var options = {};
var options = {
"grpc.max_receive_message_length": -1,
"grpc.max_send_message_length": -1
};
var creds;
if (security_params) {
var ca_path;
@ -180,6 +183,8 @@ BenchmarkClient.prototype.startClosedLoop = function(
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var makeCall;
var argument;
@ -270,6 +275,8 @@ BenchmarkClient.prototype.startPoisson = function(
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var makeCall;
var argument;
@ -354,9 +361,11 @@ BenchmarkClient.prototype.startPoisson = function(
*/
BenchmarkClient.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
var histogram = this.histogram;
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.histogram = new Histogram(histogram.resolution,
histogram.max_possible);
}
@ -371,9 +380,8 @@ BenchmarkClient.prototype.mark = function(reset) {
count: histogram.getCount()
},
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -95,7 +95,6 @@ function BenchmarkClient(server_targets, channels, histogram_params,
var host_port;
host_port = server_targets[i % server_targets.length].split(':');
var new_options = _.assign({hostname: host_port[0], port: +host_port[1]}, options);
new_options.agent = new protocol.Agent(new_options);
this.client_options[i] = new_options;
}
@ -137,6 +136,7 @@ BenchmarkClient.prototype.startClosedLoop = function(
}
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var argument = {
response_size: resp_size,
@ -207,6 +207,7 @@ BenchmarkClient.prototype.startPoisson = function(
}
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var argument = {
response_size: resp_size,
@ -264,9 +265,11 @@ BenchmarkClient.prototype.startPoisson = function(
*/
BenchmarkClient.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
var histogram = this.histogram;
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.histogram = new Histogram(histogram.resolution,
histogram.max_possible);
}
@ -281,9 +284,8 @@ BenchmarkClient.prototype.mark = function(reset) {
count: histogram.getCount()
},
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -132,7 +132,12 @@ function BenchmarkServer(host, port, tls, generic, response_size) {
server_creds = grpc.ServerCredentials.createInsecure();
}
var server = new grpc.Server();
var options = {
"grpc.max_receive_message_length": -1,
"grpc.max_send_message_length": -1
};
var server = new grpc.Server(options);
this.port = server.bind(host + ':' + port, server_creds);
if (generic) {
server.addService(genericService, {
@ -156,6 +161,7 @@ util.inherits(BenchmarkServer, EventEmitter);
BenchmarkServer.prototype.start = function() {
this.server.start();
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.emit('started');
};
@ -175,14 +181,15 @@ BenchmarkServer.prototype.getPort = function() {
*/
BenchmarkServer.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
}
return {
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -81,6 +81,7 @@ BenchmarkServer.prototype.start = function() {
var self = this;
this.server.listen(this.input_port, this.input_hostname, function() {
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
self.emit('started');
});
};
@ -91,14 +92,15 @@ BenchmarkServer.prototype.getPort = function() {
BenchmarkServer.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
}
return {
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -315,7 +315,7 @@
}
- (void)dealloc {
grpc_call_destroy(_call);
grpc_call_unref(_call);
}
@end

@ -258,7 +258,7 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
@ -437,7 +437,7 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -65,7 +65,7 @@ static zend_object_handlers call_ce_handlers;
/* Frees and destroys an instance of wrapped_grpc_call */
PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_call)
if (p->owned && p->wrapped != NULL) {
grpc_call_destroy(p->wrapped);
grpc_call_unref(p->wrapped);
}
PHP_GRPC_FREE_WRAPPED_FUNC_END()

@ -106,7 +106,7 @@ cdef class Call:
def __dealloc__(self):
if self.c_call != NULL:
grpc_call_destroy(self.c_call)
grpc_call_unref(self.c_call)
grpc_shutdown()
# The object *should* always be valid from Python. Used for debugging.

@ -329,7 +329,7 @@ cdef extern from "grpc/grpc.h":
const char *description,
void *reserved) nogil
char *grpc_call_get_peer(grpc_call *call) nogil
void grpc_call_destroy(grpc_call *call) nogil
void grpc_call_unref(grpc_call *call) nogil
grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_args *args,

@ -0,0 +1,69 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Prompted by and minimal repro of https://github.com/grpc/grpc/issues/10658
require_relative './end2end_common'
def main
server_port = ''
OptionParser.new do |opts|
opts.on('--client_control_port=P', String) do
STDERR.puts 'client control port not used'
end
opts.on('--server_port=P', String) do |p|
server_port = p
end
end.parse!
p = fork do
stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
:this_channel_is_insecure)
stub.echo(Echo::EchoRequest.new(request: 'hello'))
end
begin
Timeout.timeout(10) do
Process.wait(p)
end
rescue Timeout::Error
STDERR.puts "timeout waiting for forked process #{p}"
Process.kill('SIGKILL', p)
Process.wait(p)
raise 'Timed out waiting for client process. ' \
'It likely hangs when using gRPC after loading it and then forking'
end
client_exit_code = $CHILD_STATUS
fail "forked process failed #{client_exit_code}" if client_exit_code != 0
end
main

@ -0,0 +1,69 @@
#!/usr/bin/env ruby
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require_relative './end2end_common'
def main
STDERR.puts 'start server'
server_runner = ServerRunner.new(EchoServerImpl)
server_port = server_runner.run
# TODO(apolcyn) Can we get rid of this sleep?
# Without it, an immediate call to the just started EchoServer
# fails with UNAVAILABLE
sleep 1
STDERR.puts 'start client'
_, client_pid = start_client('forking_client_client.rb',
server_port)
begin
Timeout.timeout(10) do
Process.wait(client_pid)
end
rescue Timeout::Error
STDERR.puts "timeout wait for client pid #{client_pid}"
Process.kill('SIGKILL', client_pid)
Process.wait(client_pid)
STDERR.puts 'killed client child'
raise 'Timed out waiting for client process. ' \
'It likely hangs when requiring grpc, then forking, then using grpc '
end
client_exit_code = $CHILD_STATUS
if client_exit_code != 0
fail "forking client client failed, exit code #{client_exit_code}"
end
server_runner.stop
end
main

@ -0,0 +1,77 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# For GRPC::Core classes, which use the grpc c-core, object init
# is interesting because it's related to overall library init.
require_relative './end2end_common'
def main
grpc_class = ''
OptionParser.new do |opts|
opts.on('--grpc_class=P', String) do |p|
grpc_class = p
end
end.parse!
test_proc = nil
case grpc_class
when 'channel'
test_proc = proc do
GRPC::Core::Channel.new('dummy_host', nil, :this_channel_is_insecure)
end
when 'server'
test_proc = proc do
GRPC::Core::Server.new({})
end
when 'channel_credentials'
test_proc = proc do
GRPC::Core::ChannelCredentials.new
end
when 'call_credentials'
test_proc = proc do
GRPC::Core::CallCredentials.new(proc { |noop| noop })
end
when 'compression_options'
test_proc = proc do
GRPC::Core::CompressionOptions.new
end
else
fail "bad --grpc_class=#{grpc_class} param"
end
th = Thread.new { test_proc.call }
test_proc.call
th.join
end
main

@ -0,0 +1,67 @@
#!/usr/bin/env ruby
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require_relative './end2end_common'
def main
native_grpc_classes = %w( channel
server
channel_credentials
call_credentials
compression_options )
native_grpc_classes.each do |grpc_class|
STDERR.puts 'start client'
this_dir = File.expand_path(File.dirname(__FILE__))
client_path = File.join(this_dir, 'grpc_class_init_client.rb')
client_pid = Process.spawn(RbConfig.ruby,
client_path,
"--grpc_class=#{grpc_class}")
begin
Timeout.timeout(10) do
Process.wait(client_pid)
end
rescue Timeout::Error
STDERR.puts "timeout waiting for client pid #{client_pid}"
Process.kill('SIGKILL', client_pid)
Process.wait(client_pid)
STDERR.puts 'killed client child'
raise 'Timed out waiting for client process. ' \
'It likely hangs when the first constructed gRPC object has ' \
"type: #{grpc_class}"
end
client_exit_code = $CHILD_STATUS
fail "client failed, exit code #{client_exit_code}" if client_exit_code != 0
end
end
main

@ -101,7 +101,7 @@ typedef struct grpc_rb_call {
static void destroy_call(grpc_rb_call *call) {
/* Ensure that we only try to destroy the call once */
if (call->wrapped != NULL) {
grpc_call_destroy(call->wrapped);
grpc_call_unref(call->wrapped);
call->wrapped = NULL;
grpc_rb_completion_queue_destroy(call->queue);
call->queue = NULL;

@ -223,6 +223,8 @@ static VALUE grpc_rb_call_credentials_init(VALUE self, VALUE proc) {
grpc_call_credentials *creds = NULL;
grpc_metadata_credentials_plugin plugin;
grpc_ruby_once_init();
TypedData_Get_Struct(self, grpc_rb_call_credentials,
&grpc_rb_call_credentials_data_type, wrapper);
@ -283,8 +285,6 @@ void Init_grpc_call_credentials() {
grpc_rb_call_credentials_compose, -1);
id_callback = rb_intern("__callback");
grpc_rb_event_queue_thread_start();
}
/* Gets the wrapped grpc_call_credentials from the ruby wrapper */

@ -89,10 +89,14 @@ typedef struct grpc_rb_channel {
static void grpc_rb_channel_try_register_connection_polling(
grpc_rb_channel *wrapper);
static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper);
static void *wait_until_channel_polling_thread_started_no_gil(void *);
static void wait_until_channel_polling_thread_started_unblocking_func(void *);
static grpc_completion_queue *channel_polling_cq;
static gpr_mu global_connection_polling_mu;
static gpr_cv global_connection_polling_cv;
static int abort_channel_polling = 0;
static int channel_polling_thread_started = 0;
/* Destroys Channel instances. */
static void grpc_rb_channel_free(void *p) {
@ -166,6 +170,11 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
grpc_channel_args args;
MEMZERO(&args, grpc_channel_args, 1);
grpc_ruby_once_init();
rb_thread_call_without_gvl(
wait_until_channel_polling_thread_started_no_gil, NULL,
wait_until_channel_polling_thread_started_unblocking_func, NULL);
/* "3" == 3 mandatory args */
rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
@ -440,6 +449,7 @@ static void grpc_rb_channel_try_register_connection_polling(
}
gpr_mu_lock(&global_connection_polling_mu);
GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
conn_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
if (conn_state != wrapper->current_connectivity_state) {
wrapper->current_connectivity_state = conn_state;
@ -473,7 +483,7 @@ static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) {
}
// Note this loop breaks out with a single call of
// "grpc_rb_event_unblocking_func".
// "run_poll_channels_loop_no_gil".
// This assumes that a ruby call the unblocking func
// indicates process shutdown.
// In the worst case, this stops polling channel connectivity
@ -481,6 +491,14 @@ static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) {
static void *run_poll_channels_loop_no_gil(void *arg) {
grpc_event event;
(void)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
gpr_mu_lock(&global_connection_polling_mu);
GPR_ASSERT(!channel_polling_thread_started);
channel_polling_thread_started = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
for (;;) {
event = grpc_completion_queue_next(
channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
@ -500,7 +518,7 @@ static void *run_poll_channels_loop_no_gil(void *arg) {
}
// Notify the channel polling loop to cleanup and shutdown.
static void grpc_rb_event_unblocking_func(void *arg) {
static void run_poll_channels_loop_unblocking_func(void *arg) {
(void)arg;
gpr_mu_lock(&global_connection_polling_mu);
gpr_log(GPR_DEBUG,
@ -518,10 +536,37 @@ static VALUE run_poll_channels_loop(VALUE arg) {
GPR_DEBUG,
"GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
grpc_rb_event_unblocking_func, NULL);
run_poll_channels_loop_unblocking_func, NULL);
return Qnil;
}
static void *wait_until_channel_polling_thread_started_no_gil(void *arg) {
(void)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
gpr_mu_lock(&global_connection_polling_mu);
while (!channel_polling_thread_started && !abort_channel_polling) {
gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&global_connection_polling_mu);
return NULL;
}
static void wait_until_channel_polling_thread_started_unblocking_func(
void *arg) {
(void)arg;
gpr_mu_lock(&global_connection_polling_mu);
gpr_log(GPR_DEBUG,
"GRPC_RUBY: "
"wait_until_channel_polling_thread_started_unblocking_func - begin "
"aborting connection polling");
abort_channel_polling = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
}
/* Temporary fix for
* https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
* Transports in idle channels can get destroyed. Normally c-core re-connects,
@ -532,11 +577,26 @@ static VALUE run_poll_channels_loop(VALUE arg) {
* calls - so that c-core can reconnect if needed, when there aren't any RPC's.
* TODO(apolcyn) remove this when core handles new RPCs on dead connections.
*/
static void start_poll_channels_loop() {
channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
void grpc_rb_channel_polling_thread_start() {
VALUE background_thread = Qnil;
GPR_ASSERT(!abort_channel_polling);
GPR_ASSERT(!channel_polling_thread_started);
GPR_ASSERT(channel_polling_cq == NULL);
gpr_mu_init(&global_connection_polling_mu);
abort_channel_polling = 0;
rb_thread_create(run_poll_channels_loop, NULL);
gpr_cv_init(&global_connection_polling_cv);
channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
background_thread = rb_thread_create(run_poll_channels_loop, NULL);
if (!RTEST(background_thread)) {
gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
gpr_mu_lock(&global_connection_polling_mu);
abort_channel_polling = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
}
}
static void Init_grpc_propagate_masks() {
@ -608,7 +668,6 @@ void Init_grpc_channel() {
id_insecure_channel = rb_intern("this_channel_is_insecure");
Init_grpc_propagate_masks();
Init_grpc_connectivity_states();
start_poll_channels_loop();
}
/* Gets the wrapped channel from the ruby wrapper */

@ -41,6 +41,8 @@
/* Initializes the Channel class. */
void Init_grpc_channel();
void grpc_rb_channel_polling_thread_start();
/* Gets the wrapped channel from the ruby wrapper */
grpc_channel* grpc_rb_get_wrapped_channel(VALUE v);

@ -161,6 +161,9 @@ static VALUE grpc_rb_channel_credentials_init(int argc, VALUE *argv,
grpc_ssl_pem_key_cert_pair key_cert_pair;
const char *pem_root_certs_cstr = NULL;
MEMZERO(&key_cert_pair, grpc_ssl_pem_key_cert_pair, 1);
grpc_ruby_once_init();
/* "03" == no mandatory arg, 3 optional */
rb_scan_args(argc, argv, "03", &pem_root_certs, &pem_private_key,
&pem_cert_chain);

@ -100,8 +100,11 @@ static rb_data_type_t grpc_rb_compression_options_data_type = {
Allocate the wrapped grpc compression options and
initialize it here too. */
static VALUE grpc_rb_compression_options_alloc(VALUE cls) {
grpc_rb_compression_options *wrapper =
gpr_malloc(sizeof(grpc_rb_compression_options));
grpc_rb_compression_options *wrapper = NULL;
grpc_ruby_once_init();
wrapper = gpr_malloc(sizeof(grpc_rb_compression_options));
wrapper->wrapped = NULL;
wrapper->wrapped = gpr_malloc(sizeof(grpc_compression_options));
grpc_compression_options_init(wrapper->wrapped);

@ -47,6 +47,7 @@
#include "rb_channel.h"
#include "rb_channel_credentials.h"
#include "rb_compression_options.h"
#include "rb_event_thread.h"
#include "rb_loader.h"
#include "rb_server.h"
#include "rb_server_credentials.h"
@ -289,17 +290,14 @@ VALUE sym_metadata = Qundef;
static gpr_once g_once_init = GPR_ONCE_INIT;
static void grpc_ruby_once_init() {
static void grpc_ruby_once_init_internal() {
grpc_init();
grpc_rb_event_queue_thread_start();
grpc_rb_channel_polling_thread_start();
atexit(grpc_rb_shutdown);
}
void Init_grpc_c() {
if (!grpc_rb_load_core()) {
rb_raise(rb_eLoadError, "Couldn't find or load gRPC's dynamic C core");
return;
}
void grpc_ruby_once_init() {
/* ruby_vm_at_exit doesn't seem to be working. It would crash once every
* blue moon, and some users are getting it repeatedly. See the discussions
* - https://github.com/grpc/grpc/pull/5337
@ -310,7 +308,14 @@ void Init_grpc_c() {
* then loaded again by another VM within the same process, we need to
* schedule our initialization and destruction only once.
*/
gpr_once_init(&g_once_init, grpc_ruby_once_init);
gpr_once_init(&g_once_init, grpc_ruby_once_init_internal);
}
void Init_grpc_c() {
if (!grpc_rb_load_core()) {
rb_raise(rb_eLoadError, "Couldn't find or load gRPC's dynamic C core");
return;
}
grpc_rb_mGRPC = rb_define_module("GRPC");
grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");

@ -82,4 +82,6 @@ VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self);
/* grpc_rb_time_timeval creates a gpr_timespec from a ruby time object. */
gpr_timespec grpc_rb_time_timeval(VALUE time, int interval);
void grpc_ruby_once_init();
#endif /* GRPC_RB_H_ */

@ -108,6 +108,7 @@ grpc_channel_create_call_type grpc_channel_create_call_import;
grpc_channel_ping_type grpc_channel_ping_import;
grpc_channel_register_call_type grpc_channel_register_call_import;
grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
grpc_call_arena_alloc_type grpc_call_arena_alloc_import;
grpc_call_start_batch_type grpc_call_start_batch_import;
grpc_call_get_peer_type grpc_call_get_peer_import;
grpc_census_call_set_context_type grpc_census_call_set_context_import;
@ -119,7 +120,8 @@ grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import;
grpc_channel_destroy_type grpc_channel_destroy_import;
grpc_call_cancel_type grpc_call_cancel_import;
grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
grpc_call_destroy_type grpc_call_destroy_import;
grpc_call_ref_type grpc_call_ref_import;
grpc_call_unref_type grpc_call_unref_import;
grpc_server_request_call_type grpc_server_request_call_import;
grpc_server_register_method_type grpc_server_register_method_import;
grpc_server_request_registered_call_type grpc_server_request_registered_call_import;
@ -404,6 +406,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping");
grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call");
grpc_channel_create_registered_call_import = (grpc_channel_create_registered_call_type) GetProcAddress(library, "grpc_channel_create_registered_call");
grpc_call_arena_alloc_import = (grpc_call_arena_alloc_type) GetProcAddress(library, "grpc_call_arena_alloc");
grpc_call_start_batch_import = (grpc_call_start_batch_type) GetProcAddress(library, "grpc_call_start_batch");
grpc_call_get_peer_import = (grpc_call_get_peer_type) GetProcAddress(library, "grpc_call_get_peer");
grpc_census_call_set_context_import = (grpc_census_call_set_context_type) GetProcAddress(library, "grpc_census_call_set_context");
@ -415,7 +418,8 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_channel_destroy_import = (grpc_channel_destroy_type) GetProcAddress(library, "grpc_channel_destroy");
grpc_call_cancel_import = (grpc_call_cancel_type) GetProcAddress(library, "grpc_call_cancel");
grpc_call_cancel_with_status_import = (grpc_call_cancel_with_status_type) GetProcAddress(library, "grpc_call_cancel_with_status");
grpc_call_destroy_import = (grpc_call_destroy_type) GetProcAddress(library, "grpc_call_destroy");
grpc_call_ref_import = (grpc_call_ref_type) GetProcAddress(library, "grpc_call_ref");
grpc_call_unref_import = (grpc_call_unref_type) GetProcAddress(library, "grpc_call_unref");
grpc_server_request_call_import = (grpc_server_request_call_type) GetProcAddress(library, "grpc_server_request_call");
grpc_server_register_method_import = (grpc_server_register_method_type) GetProcAddress(library, "grpc_server_register_method");
grpc_server_request_registered_call_import = (grpc_server_request_registered_call_type) GetProcAddress(library, "grpc_server_request_registered_call");

@ -275,6 +275,9 @@ extern grpc_channel_register_call_type grpc_channel_register_call_import;
typedef grpc_call *(*grpc_channel_create_registered_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline, void *reserved);
extern grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
#define grpc_channel_create_registered_call grpc_channel_create_registered_call_import
typedef void *(*grpc_call_arena_alloc_type)(grpc_call *call, size_t size);
extern grpc_call_arena_alloc_type grpc_call_arena_alloc_import;
#define grpc_call_arena_alloc grpc_call_arena_alloc_import
typedef grpc_call_error(*grpc_call_start_batch_type)(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved);
extern grpc_call_start_batch_type grpc_call_start_batch_import;
#define grpc_call_start_batch grpc_call_start_batch_import
@ -308,9 +311,12 @@ extern grpc_call_cancel_type grpc_call_cancel_import;
typedef grpc_call_error(*grpc_call_cancel_with_status_type)(grpc_call *call, grpc_status_code status, const char *description, void *reserved);
extern grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
#define grpc_call_cancel_with_status grpc_call_cancel_with_status_import
typedef void(*grpc_call_destroy_type)(grpc_call *call);
extern grpc_call_destroy_type grpc_call_destroy_import;
#define grpc_call_destroy grpc_call_destroy_import
typedef void(*grpc_call_ref_type)(grpc_call *call);
extern grpc_call_ref_type grpc_call_ref_import;
#define grpc_call_ref grpc_call_ref_import
typedef void(*grpc_call_unref_type)(grpc_call *call);
extern grpc_call_unref_type grpc_call_unref_import;
#define grpc_call_unref grpc_call_unref_import
typedef grpc_call_error(*grpc_server_request_call_type)(grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new);
extern grpc_server_request_call_type grpc_server_request_call_import;
#define grpc_server_request_call grpc_server_request_call_import

@ -132,11 +132,15 @@ static VALUE grpc_rb_server_alloc(VALUE cls) {
Initializes server instances. */
static VALUE grpc_rb_server_init(VALUE self, VALUE channel_args) {
grpc_completion_queue *cq = grpc_completion_queue_create_for_pluck(NULL);
grpc_completion_queue *cq = NULL;
grpc_rb_server *wrapper = NULL;
grpc_server *srv = NULL;
grpc_channel_args args;
MEMZERO(&args, grpc_channel_args, 1);
grpc_ruby_once_init();
cq = grpc_completion_queue_create_for_pluck(NULL);
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type,
wrapper);
grpc_rb_hash_convert_to_channel_args(channel_args, &args);

@ -205,7 +205,7 @@
% endfor
]
}],
['OS == "win"', {
['OS == "win" and runtime!="electron"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
@ -216,6 +216,8 @@
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
#
# This is not true of Electron, which does not have OpenSSL headers.
'target_name': 'WINDOWS_BUILD_WARNING',
'rules': [
{

@ -10,6 +10,8 @@
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/src/php/ext/grpc)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/boringssl/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/cares)
LIBS="-lpthread $LIBS"
@ -20,8 +22,11 @@
PHP_ADD_LIBRARY(dl)
case $host in
*darwin*) ;;
*darwin*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_darwin)
;;
*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_linux)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;

@ -103,7 +103,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload != NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
grpc_byte_buffer_destroy(payload);
cq_verifier_destroy(cqv);
}

@ -131,7 +131,7 @@ static void server_verifier(grpc_server *server, grpc_completion_queue *cq,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}
@ -177,7 +177,7 @@ static void server_verifier_sends_too_much_metadata(grpc_server *server,
grpc_slice_unref(meta.value);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -76,7 +76,7 @@ static void verifier_succeeds(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload != NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
grpc_byte_buffer_destroy(payload);
cq_verifier_destroy(cqv);
}
@ -102,7 +102,7 @@ static void verifier_fails(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload == NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -122,7 +122,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -115,7 +115,7 @@ static void run_test(const char *target, size_t nops) {
GPR_ASSERT(status != GRPC_STATUS_OK);
grpc_call_destroy(c);
grpc_call_unref(c);
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);

@ -394,7 +394,7 @@ static request_sequences perform_request(servers_fixture *f,
"foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 1);
grpc_call_destroy(f->server_calls[s_idx]);
grpc_call_unref(f->server_calls[s_idx]);
/* ask for the next request on this server */
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
@ -420,7 +420,7 @@ static request_sequences perform_request(servers_fixture *f,
cq_verifier_destroy(cqv);
grpc_call_destroy(c);
grpc_call_unref(c);
for (i = 0; i < f->num_servers; i++) {
grpc_call_details_destroy(&rdata->call_details[i]);
@ -616,7 +616,7 @@ static void test_pending_calls(size_t concurrent_calls) {
/* destroy the calls after the channel so that they are still around for the
* LB's shutdown func to process */
for (i = 0; i < concurrent_calls; i++) {
grpc_call_destroy(calls[i]);
grpc_call_unref(calls[i]);
}
gpr_free(calls);
teardown_servers(f);

@ -236,7 +236,7 @@ static void cleanup_rpc(grpc_exec_ctx *exec_ctx) {
grpc_event ev;
grpc_slice_buffer_destroy_internal(exec_ctx, &state.temp_incoming_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &state.outgoing_buffer);
grpc_call_destroy(state.call);
grpc_call_unref(state.call);
grpc_completion_queue_shutdown(state.cq);
do {
ev = grpc_completion_queue_next(state.cq, n_sec_deadline(1), NULL);

@ -138,7 +138,7 @@ static void run_test(bool wait_for_ready, bool use_service_config) {
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq);
grpc_call_destroy(call);
grpc_call_unref(call);
grpc_channel_destroy(chan);
cq_verifier_destroy(cqv);

@ -243,7 +243,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
grpc_slice_str_cmp(call_details.host, "foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 1);
grpc_call_destroy(s);
grpc_call_unref(s);
} else {
/* Check for a failed connection. */
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
@ -252,7 +252,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
}
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -343,7 +343,7 @@ static void simple_request_body(grpc_end2end_test_fixture f,
CQ_EXPECT_COMPLETION(cqv, tag(1), expected_result == SUCCESS);
cq_verify(cqv);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
}

@ -148,8 +148,8 @@ void grpc_end2end_proxy_destroy(grpc_end2end_proxy *proxy) {
static void unrefpc(proxy_call *pc, const char *reason) {
if (gpr_unref(&pc->refs)) {
grpc_call_destroy(pc->c2p);
grpc_call_destroy(pc->p2s);
grpc_call_unref(pc->c2p);
grpc_call_unref(pc->p2s);
grpc_metadata_array_destroy(&pc->c2p_initial_metadata);
grpc_metadata_array_destroy(&pc->p2s_initial_metadata);
grpc_metadata_array_destroy(&pc->p2s_trailing_metadata);

@ -661,7 +661,7 @@ static void read_metadata(input_stream *inp, size_t *count,
}
static call_state *destroy_call(call_state *call) {
grpc_call_destroy(call->call);
grpc_call_unref(call->call);
call->call = NULL;
return maybe_delete_call_state(call);
}

@ -151,7 +151,7 @@ done:
ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
}
grpc_call_destroy(call);
grpc_call_unref(call);
grpc_completion_queue_destroy(cq);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);

@ -109,7 +109,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
}
done:
if (call1 != NULL) grpc_call_destroy(call1);
if (call1 != NULL) grpc_call_unref(call1);
grpc_call_details_destroy(&call_details1);
grpc_metadata_array_destroy(&request_metadata1);
grpc_server_shutdown_and_notify(server, cq, tag(0xdead));

@ -39,9 +39,9 @@ import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes')
'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True)
True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
@ -51,8 +51,7 @@ fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options,
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
@ -83,8 +82,8 @@ END2END_FIXTURES = {
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky')
default_test_options = TestOptions(False, False, True, False, True, 1.0, [], False, False)
'needs_fullstack needs_dns proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allow_compression')
default_test_options = TestOptions(False, False, True, False, True, 1.0, [], False, False, True)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
@ -94,12 +93,13 @@ END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options,
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options,
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(large_writes=True,
proxyable=False),
proxyable=False,
allow_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options,
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
@ -110,46 +110,49 @@ END2END_TESTS = {
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True),
'disappearing_server': connectivity_test_options._replace(flaky=True),
'empty_batch': default_test_options,
'filter_causes_close': default_test_options,
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False),
'high_initial_seqno': default_test_options,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False),
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(proxyable=False),
'max_connection_age': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv']),
'max_message_length': default_test_options,
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'network_status_change': default_test_options,
'network_status_change': default_test_options._replace(cpu_cost=LOWCPU),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options,
'ping': connectivity_test_options._replace(proxyable=False),
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options,
'server_finishes_request': default_test_options,
'shutdown_finishes_calls': default_test_options,
'shutdown_finishes_tags': default_test_options,
'simple_cacheable_request': default_test_options,
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'write_buffering': default_test_options,
'write_buffering_at_end': default_test_options,
'write_buffering': default_test_options._replace(cpu_cost=LOWCPU),
'write_buffering_at_end': default_test_options._replace(cpu_cost=LOWCPU),
}
@ -169,6 +172,9 @@ def compatible(f, t):
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allow_compression:
if END2END_FIXTURES[f].enables_compression:
return False
return True

@ -302,10 +302,10 @@ int main(int argc, char **argv) {
CQ_EXPECT_COMPLETION(cqv, tag(0xdead2), 1);
cq_verify(cqv);
grpc_call_destroy(call1);
grpc_call_destroy(call2);
grpc_call_destroy(server_call1);
grpc_call_destroy(server_call2);
grpc_call_unref(call1);
grpc_call_unref(call2);
grpc_call_unref(server_call1);
grpc_call_unref(server_call2);
grpc_server_destroy(server1);
grpc_server_destroy(server2);
grpc_channel_destroy(chan);

@ -126,7 +126,7 @@ static void prepare_test(int is_client) {
static void cleanup_test() {
grpc_completion_queue *shutdown_cq;
grpc_call_destroy(g_state.call);
grpc_call_unref(g_state.call);
cq_verifier_destroy(g_state.cqv);
grpc_channel_destroy(g_state.chan);
grpc_slice_unref(g_state.details);
@ -135,7 +135,7 @@ static void cleanup_test() {
if (!g_state.is_client) {
shutdown_cq = grpc_completion_queue_create_for_pluck(NULL);
grpc_call_destroy(g_state.server_call);
grpc_call_unref(g_state.server_call);
grpc_server_shutdown_and_notify(g_state.server, shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),

@ -97,7 +97,7 @@ int main(int argc, char **argv) {
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq);
grpc_call_destroy(call);
grpc_call_unref(call);
grpc_channel_destroy(chan);
cq_verifier_destroy(cqv);

@ -182,7 +182,7 @@ static void test_with_authority_header(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -163,7 +163,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
}

@ -208,7 +208,7 @@ static void test_bad_ping(grpc_end2end_test_config config) {
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), 1);
cq_verify(cqv);
grpc_call_destroy(s);
grpc_call_unref(s);
// The connection should be closed immediately after the misbehaved pings,
// the in-progress RPC should fail.
@ -224,7 +224,7 @@ static void test_bad_ping(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);
config.tear_down_data(&f);

@ -314,8 +314,8 @@ static void test_request_response_with_metadata_and_payload(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -347,8 +347,8 @@ static void request_response_with_payload_and_call_creds(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
@ -473,7 +473,7 @@ static void test_request_with_server_rejecting_client_creds(
grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);

@ -252,8 +252,8 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
if (args != NULL) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

@ -229,8 +229,8 @@ static void test_cancel_after_accept_and_writes_closed(
grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
end_test(&f);

@ -189,7 +189,7 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config,
grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);

@ -186,7 +186,7 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config,
grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);

@ -118,7 +118,7 @@ static void test_cancel_in_a_vacuum(grpc_end2end_test_config config,
GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c, NULL));
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(v_client);
end_test(&f);

@ -165,7 +165,7 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
}

@ -261,8 +261,8 @@ static void request_for_disabled_algorithm(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
@ -519,8 +519,8 @@ static void request_with_payload_template(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -214,8 +214,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -191,8 +191,8 @@ static void do_request_and_shutdown_server(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
}
static void disappearing_server_test(grpc_end2end_test_config config) {

@ -121,7 +121,7 @@ static void empty_batch_body(grpc_end2end_test_config config,
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
}

@ -194,7 +194,7 @@ static void test_server_channel_filter(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
@ -284,7 +284,7 @@ static void test_client_channel_filter(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
@ -372,7 +372,7 @@ static void test_client_subchannel_filter(grpc_end2end_test_config config) {
// Reset and create a new call. (The first call uses a different code
// path in client_channel.c than subsequent calls on the same channel,
// and we need to test both.)
grpc_call_destroy(c);
grpc_call_unref(c);
status = GRPC_STATUS_OK;
grpc_slice_unref(details);
details = grpc_empty_slice();
@ -399,7 +399,7 @@ static void test_client_subchannel_filter(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -189,7 +189,7 @@ static void test_request(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -228,8 +228,8 @@ static void test_request(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_destroy(c);
grpc_call_unref(s);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -192,7 +192,7 @@ static void test_early_server_shutdown_finishes_inflight_calls(
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
grpc_call_destroy(s);
grpc_call_unref(s);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo"));
@ -206,7 +206,7 @@ static void test_early_server_shutdown_finishes_inflight_calls(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -205,8 +205,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
/* TODO(ctiller): this rate limits the test, and it should be removed when
retry has been implemented; until then cross-thread chatter

@ -358,8 +358,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -219,8 +219,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -258,8 +258,8 @@ static void test_invoke_large_request(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -223,8 +223,8 @@ static void test_keepalive_timeout(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -246,8 +246,8 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -266,8 +266,8 @@ static void request_response_with_payload(
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);

@ -201,8 +201,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}
@ -433,10 +433,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
cq_verifier_destroy(cqv);
grpc_call_destroy(c1);
grpc_call_destroy(s1);
grpc_call_destroy(c2);
grpc_call_destroy(s2);
grpc_call_unref(c1);
grpc_call_unref(s1);
grpc_call_unref(c2);
grpc_call_unref(s2);
grpc_slice_unref(details1);
grpc_slice_unref(details2);
@ -628,10 +628,10 @@ static void test_max_concurrent_streams_with_timeout_on_first(
cq_verifier_destroy(cqv);
grpc_call_destroy(c1);
grpc_call_destroy(s1);
grpc_call_destroy(c2);
grpc_call_destroy(s2);
grpc_call_unref(c1);
grpc_call_unref(s1);
grpc_call_unref(c2);
grpc_call_unref(s2);
grpc_slice_unref(details1);
grpc_slice_unref(details2);
@ -789,7 +789,7 @@ static void test_max_concurrent_streams_with_timeout_on_second(
/* second request is finished because of time out, so destroy the second call
*/
grpc_call_destroy(c2);
grpc_call_unref(c2);
/* now reply the first call */
memset(ops, 0, sizeof(ops));
@ -821,8 +821,8 @@ static void test_max_concurrent_streams_with_timeout_on_second(
cq_verifier_destroy(cqv);
grpc_call_destroy(c1);
grpc_call_destroy(s1);
grpc_call_unref(c1);
grpc_call_unref(s1);
grpc_slice_unref(details1);
grpc_slice_unref(details2);

@ -214,7 +214,7 @@ static void test_max_age_forcibly_close(grpc_end2end_test_config config) {
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), true);
cq_verify(cqv);
grpc_call_destroy(s);
grpc_call_unref(s);
/* The connection should be closed immediately after the max age grace period,
the in-progress RPC should fail. */
@ -230,7 +230,7 @@ static void test_max_age_forcibly_close(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);
config.tear_down_data(&f);
@ -352,7 +352,7 @@ static void test_max_age_gracefully_close(grpc_end2end_test_config config) {
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), true);
cq_verify(cqv);
grpc_call_destroy(s);
grpc_call_unref(s);
/* The connection is closed gracefully with goaway, the rpc should still be
completed. */
@ -368,7 +368,7 @@ static void test_max_age_gracefully_close(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);
config.tear_down_data(&f);

@ -175,8 +175,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -289,8 +289,8 @@ done:
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(recv_payload);
grpc_call_destroy(c);
if (s != NULL) grpc_call_destroy(s);
grpc_call_unref(c);
if (s != NULL) grpc_call_unref(s);
cq_verifier_destroy(cqv);
@ -483,8 +483,8 @@ static void test_max_message_length_on_response(grpc_end2end_test_config config,
grpc_byte_buffer_destroy(response_payload);
grpc_byte_buffer_destroy(recv_payload);
grpc_call_destroy(c);
if (s != NULL) grpc_call_destroy(s);
grpc_call_unref(c);
if (s != NULL) grpc_call_unref(s);
cq_verifier_destroy(cqv);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save