Merge branch 'master' of https://github.com/grpc/grpc into serialization-refactor

pull/10743/head
ncteisen 8 years ago
commit cc7e17b2d6
  1. 8
      CMakeLists.txt
  2. 27
      Makefile
  3. 6
      WORKSPACE
  4. 4
      binding.gyp
  5. 16
      build.yaml
  6. 57
      config.m4
  7. 2
      doc/binary-logging.md
  8. 4
      grpc.def
  9. 129
      include/grpc++/impl/codegen/async_stream.h
  10. 91
      include/grpc++/impl/codegen/async_unary_call.h
  11. 37
      include/grpc++/impl/codegen/call.h
  12. 4
      include/grpc++/impl/codegen/core_codegen.h
  13. 7
      include/grpc++/impl/codegen/core_codegen_interface.h
  14. 16
      include/grpc/grpc.h
  15. 73
      package.xml
  16. 23
      src/compiler/cpp_generator.cc
  17. 4
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  18. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  19. 15
      src/core/lib/iomgr/error.c
  20. 26
      src/core/lib/iomgr/error.h
  21. 33
      src/core/lib/support/stack_lockfree.c
  22. 31
      src/core/lib/surface/call.c
  23. 2
      src/core/lib/surface/server.c
  24. 2
      src/cpp/client/channel_cc.cc
  25. 2
      src/cpp/client/client_context.cc
  26. 2
      src/cpp/client/generic_stub.cc
  27. 6
      src/cpp/common/core_codegen.cc
  28. 2
      src/cpp/server/server_cc.cc
  29. 7
      src/cpp/server/server_context.cc
  30. 279
      src/csharp/ext/grpc_csharp_ext.c
  31. 4
      src/node/ext/byte_buffer.cc
  32. 2
      src/node/ext/byte_buffer.h
  33. 259
      src/node/ext/call.cc
  34. 3
      src/node/ext/call.h
  35. 77
      src/node/ext/call_credentials.cc
  36. 4
      src/node/ext/call_credentials.h
  37. 58
      src/node/ext/channel.cc
  38. 2
      src/node/ext/channel.h
  39. 29
      src/node/ext/channel_credentials.cc
  40. 2
      src/node/ext/channel_credentials.h
  41. 2
      src/node/ext/completion_queue.h
  42. 85
      src/node/ext/node_grpc.cc
  43. 30
      src/node/ext/server.cc
  44. 2
      src/node/ext/server.h
  45. 22
      src/node/ext/server_credentials.cc
  46. 2
      src/node/ext/server_credentials.h
  47. 33
      src/node/ext/server_uv.cc
  48. 36
      src/node/ext/slice.cc
  49. 7
      src/node/ext/slice.h
  50. 2
      src/node/ext/timeval.cc
  51. 16
      src/node/performance/benchmark_client.js
  52. 10
      src/node/performance/benchmark_client_express.js
  53. 15
      src/node/performance/benchmark_server.js
  54. 8
      src/node/performance/benchmark_server_express.js
  55. 2
      src/objective-c/GRPCClient/private/GRPCWrappedCall.m
  56. 4
      src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
  57. 2
      src/php/ext/grpc/call.c
  58. 2
      src/php/lib/Grpc/BaseStub.php
  59. 1
      src/python/grpcio/.gitignore
  60. 2
      src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
  61. 2
      src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
  62. 32
      src/python/grpcio/grpc/_grpcio_metadata.py
  63. 69
      src/ruby/end2end/forking_client_client.rb
  64. 69
      src/ruby/end2end/forking_client_driver.rb
  65. 77
      src/ruby/end2end/grpc_class_init_client.rb
  66. 67
      src/ruby/end2end/grpc_class_init_driver.rb
  67. 14
      src/ruby/ext/grpc/rb_byte_buffer.c
  68. 104
      src/ruby/ext/grpc/rb_call.c
  69. 7
      src/ruby/ext/grpc/rb_call.h
  70. 49
      src/ruby/ext/grpc/rb_call_credentials.c
  71. 128
      src/ruby/ext/grpc/rb_channel.c
  72. 2
      src/ruby/ext/grpc/rb_channel.h
  73. 20
      src/ruby/ext/grpc/rb_channel_args.c
  74. 31
      src/ruby/ext/grpc/rb_channel_credentials.c
  75. 16
      src/ruby/ext/grpc/rb_completion_queue.c
  76. 20
      src/ruby/ext/grpc/rb_compression_options.c
  77. 22
      src/ruby/ext/grpc/rb_event_thread.c
  78. 3
      src/ruby/ext/grpc/rb_event_thread.h
  79. 44
      src/ruby/ext/grpc/rb_grpc.c
  80. 6
      src/ruby/ext/grpc/rb_grpc.h
  81. 8
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  82. 12
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  83. 6
      src/ruby/ext/grpc/rb_server.c
  84. 6
      templates/CMakeLists.txt.template
  85. 4
      templates/binding.gyp.template
  86. 7
      templates/config.m4.template
  87. 34
      templates/src/python/grpcio/grpc/_grpcio_metadata.py.template
  88. 2
      test/core/bad_client/tests/head_of_line_blocking.c
  89. 4
      test/core/bad_client/tests/large_metadata.c
  90. 4
      test/core/bad_client/tests/server_registered_method.c
  91. 2
      test/core/bad_client/tests/simple_request.c
  92. 2
      test/core/bad_ssl/bad_ssl_test.c
  93. 6
      test/core/client_channel/lb_policies_test.c
  94. 2
      test/core/end2end/bad_server_response_test.c
  95. 2
      test/core/end2end/connection_refused_test.c
  96. 4
      test/core/end2end/dualstack_socket_test.c
  97. 2
      test/core/end2end/fixtures/h2_ssl_cert.c
  98. 4
      test/core/end2end/fixtures/proxy.c
  99. 2
      test/core/end2end/fuzzers/api_fuzzer.c
  100. 2
      test/core/end2end/fuzzers/client_fuzzer.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -269,8 +269,10 @@ if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
if(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES dl rt m pthread)
if(_gRPC_PLATFORM_MAC)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
elseif(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} rt m pthread)
endif()
if(WIN32 AND MSVC)
@ -9775,6 +9777,8 @@ target_include_directories(codegen_test_minimal
target_link_libraries(codegen_test_minimal
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc
gpr
${_gRPC_GFLAGS_LIBRARIES}
)

@ -157,6 +157,15 @@ LDXX_asan-noleaks = clang++
CPPFLAGS_asan-noleaks = -O0 -fsanitize-coverage=edge -fsanitize=address -fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
LDFLAGS_asan-noleaks = -fsanitize=address
VALID_CONFIG_c++-compat = 1
CC_c++-compat = $(DEFAULT_CC)
CXX_c++-compat = $(DEFAULT_CXX)
LD_c++-compat = $(DEFAULT_CC)
LDXX_c++-compat = $(DEFAULT_CXX)
CFLAGS_c++-compat = -Wc++-compat
CPPFLAGS_c++-compat = -O0
DEFINES_c++-compat = _DEBUG DEBUG
VALID_CONFIG_ubsan = 1
REQUIRE_CUSTOM_LIBRARIES_ubsan = 1
CC_ubsan = clang
@ -14193,28 +14202,28 @@ $(BINDIR)/$(CONFIG)/codegen_test_minimal: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS)
$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal
$(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o:
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o:
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o:
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_codegen_test_minimal: $(CODEGEN_TEST_MINIMAL_OBJS:.o=.dep)

@ -77,12 +77,6 @@ local_repository(
path = "third_party/gflags",
)
git_repository(
name = "mongoose_repo",
commit = "4120a97945b41195a6223a600dae8e3b19bed19e",
remote = "https://github.com/makdharma/mongoose.git"
)
new_local_repository(
name = "submodule_benchmark",
path = "third_party/benchmark",

@ -507,7 +507,7 @@
},
]
}],
['OS == "win"', {
['OS == "win" and runtime!="electron"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
@ -518,6 +518,8 @@
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
#
# This is not true of Electron, which does not have OpenSSL headers.
'target_name': 'WINDOWS_BUILD_WARNING',
'rules': [
{

@ -1516,6 +1516,7 @@ libs:
- global
targets:
- name: alarm_test
cpu_cost: 0.1
build: test
language: c
src:
@ -1699,7 +1700,7 @@ targets:
dict: test/core/end2end/fuzzers/hpack.dictionary
maxlen: 2048
- name: combiner_test
cpu_cost: 30
cpu_cost: 10
build: test
language: c
src:
@ -1720,6 +1721,7 @@ targets:
- gpr_test_util
- gpr
- name: concurrent_connectivity_test
cpu_cost: 2.0
build: test
language: c
src:
@ -1806,6 +1808,7 @@ targets:
- gpr_test_util
- gpr
- name: ev_epoll_linux_test
cpu_cost: 3
build: test
language: c
src:
@ -1975,6 +1978,7 @@ targets:
- gpr_test_util
- gpr
- name: gpr_cpu_test
cpu_cost: 30
build: test
language: c
src:
@ -2024,7 +2028,7 @@ targets:
- gpr_test_util
- gpr
- name: gpr_spinlock_test
cpu_cost: 10
cpu_cost: 3
build: test
language: c
src:
@ -3555,6 +3559,9 @@ targets:
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/stats.proto
- test/cpp/codegen/codegen_test_minimal.cc
deps:
- grpc
- gpr
filegroups:
- grpc++_codegen_base
- grpc++_codegen_base_src
@ -4377,6 +4384,10 @@ configs:
basicprof:
CPPFLAGS: -O2 -DGRPC_BASIC_PROFILER -DGRPC_TIMERS_RDTSC
DEFINES: NDEBUG
c++-compat:
CFLAGS: -Wc++-compat
CPPFLAGS: -O0
DEFINES: _DEBUG DEBUG
counters:
CPPFLAGS: -O2 -DGPR_LOW_LEVEL_COUNTERS
DEFINES: NDEBUG
@ -4518,6 +4529,7 @@ php_config_m4:
deps:
- grpc
- gpr
- ares
- boringssl
headers:
- src/php/ext/grpc/byte_buffer.h

@ -8,6 +8,8 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/src/php/ext/grpc)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/boringssl/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/cares)
LIBS="-lpthread $LIBS"
@ -18,8 +20,11 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_LIBRARY(dl)
case $host in
*darwin*) ;;
*darwin*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_darwin)
;;
*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_linux)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;
@ -622,6 +627,55 @@ if test "$PHP_GRPC" != "no"; then
third_party/boringssl/ssl/tls13_server.c \
third_party/boringssl/ssl/tls_method.c \
third_party/boringssl/ssl/tls_record.c \
third_party/cares/cares/ares__close_sockets.c \
third_party/cares/cares/ares__get_hostent.c \
third_party/cares/cares/ares__read_line.c \
third_party/cares/cares/ares__timeval.c \
third_party/cares/cares/ares_cancel.c \
third_party/cares/cares/ares_create_query.c \
third_party/cares/cares/ares_data.c \
third_party/cares/cares/ares_destroy.c \
third_party/cares/cares/ares_expand_name.c \
third_party/cares/cares/ares_expand_string.c \
third_party/cares/cares/ares_fds.c \
third_party/cares/cares/ares_free_hostent.c \
third_party/cares/cares/ares_free_string.c \
third_party/cares/cares/ares_getenv.c \
third_party/cares/cares/ares_gethostbyaddr.c \
third_party/cares/cares/ares_gethostbyname.c \
third_party/cares/cares/ares_getnameinfo.c \
third_party/cares/cares/ares_getopt.c \
third_party/cares/cares/ares_getsock.c \
third_party/cares/cares/ares_init.c \
third_party/cares/cares/ares_library_init.c \
third_party/cares/cares/ares_llist.c \
third_party/cares/cares/ares_mkquery.c \
third_party/cares/cares/ares_nowarn.c \
third_party/cares/cares/ares_options.c \
third_party/cares/cares/ares_parse_a_reply.c \
third_party/cares/cares/ares_parse_aaaa_reply.c \
third_party/cares/cares/ares_parse_mx_reply.c \
third_party/cares/cares/ares_parse_naptr_reply.c \
third_party/cares/cares/ares_parse_ns_reply.c \
third_party/cares/cares/ares_parse_ptr_reply.c \
third_party/cares/cares/ares_parse_soa_reply.c \
third_party/cares/cares/ares_parse_srv_reply.c \
third_party/cares/cares/ares_parse_txt_reply.c \
third_party/cares/cares/ares_platform.c \
third_party/cares/cares/ares_process.c \
third_party/cares/cares/ares_query.c \
third_party/cares/cares/ares_search.c \
third_party/cares/cares/ares_send.c \
third_party/cares/cares/ares_strcasecmp.c \
third_party/cares/cares/ares_strdup.c \
third_party/cares/cares/ares_strerror.c \
third_party/cares/cares/ares_timeout.c \
third_party/cares/cares/ares_version.c \
third_party/cares/cares/ares_writev.c \
third_party/cares/cares/bitncmp.c \
third_party/cares/cares/inet_net_pton.c \
third_party/cares/cares/inet_ntop.c \
third_party/cares/cares/windows_port.c \
, $ext_shared, , -Wall -Werror \
-Wno-parentheses-equality -Wno-unused-value -std=c11 \
-fvisibility=hidden -DOPENSSL_NO_ASM -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN \
@ -724,5 +778,6 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/crypto/x509)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/crypto/x509v3)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/boringssl/ssl)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/cares/cares)
PHP_ADD_BUILD_DIR($ext_builddir/third_party/nanopb)
fi

@ -2,7 +2,7 @@
## Format
The log format is described in [this proto file](src/proto/grpc/binary_log/v1alpha/log.proto). It is intended that multiple parts of the call will be logged in separate files, and then correlated by analysis tools using the rpc\_id.
The log format is described in [this proto file](/src/proto/grpc/binary_log/v1alpha/log.proto). It is intended that multiple parts of the call will be logged in separate files, and then correlated by analysis tools using the rpc\_id.
## API

@ -70,6 +70,7 @@ EXPORTS
grpc_channel_ping
grpc_channel_register_call
grpc_channel_create_registered_call
grpc_call_arena_alloc
grpc_call_start_batch
grpc_call_get_peer
grpc_census_call_set_context
@ -81,7 +82,8 @@ EXPORTS
grpc_channel_destroy
grpc_call_cancel
grpc_call_cancel_with_status
grpc_call_destroy
grpc_call_ref
grpc_call_unref
grpc_server_request_call
grpc_server_register_method
grpc_server_request_registered_call

@ -145,17 +145,19 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
/// Create a stream and write the first request out.
template <class W>
ClientAsyncReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const W& request, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
call_.PerformOps(&init_ops_);
static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReader));
}
void ReadInitialMetadata(void* tag) override {
@ -185,6 +187,19 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
private:
template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request,
void* tag)
: context_(context), call_(call) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
call_.PerformOps(&init_ops_);
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
@ -210,23 +225,19 @@ template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
template <class R>
ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
R* response, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncWriter));
}
void ReadInitialMetadata(void* tag) override {
@ -271,6 +282,24 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
private:
template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, void* tag)
: context_(context), call_(call) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
@ -298,21 +327,20 @@ template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
ClientAsyncReaderWriter(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context, void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReaderWriter));
}
void ReadInitialMetadata(void* tag) override {
@ -366,6 +394,21 @@ class ClientAsyncReaderWriter final
}
private:
ClientAsyncReaderWriter(Call call, ClientContext* context, void* tag)
: context_(context), call_(call) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#define GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#include <assert.h>
#include <grpc++/impl/codegen/call.h>
#include <grpc++/impl/codegen/channel_interface.h>
#include <grpc++/impl/codegen/client_context.h>
@ -59,57 +60,67 @@ class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
template <class W>
ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const W& request)
: context_(context),
call_(channel->CreateCall(method, context, cq)),
collection_(std::make_shared<CallOpSetCollection>()) {
collection_->init_buf_.SetCollection(collection_);
collection_->init_buf_.SendInitialMetadata(
context->send_initial_metadata_, context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(collection_->init_buf_.SendMessage(request).ok());
collection_->init_buf_.ClientSendClose();
call_.PerformOps(&collection_->init_buf_);
static ClientAsyncResponseReader* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context,
const W& request) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request);
}
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncResponseReader));
}
void ReadInitialMetadata(void* tag) {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
collection_->meta_buf_.SetCollection(collection_);
collection_->meta_buf_.set_output_tag(tag);
collection_->meta_buf_.RecvInitialMetadata(context_);
call_.PerformOps(&collection_->meta_buf_);
meta_buf_.set_output_tag(tag);
meta_buf_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
}
void Finish(R* msg, Status* status, void* tag) {
collection_->finish_buf_.SetCollection(collection_);
collection_->finish_buf_.set_output_tag(tag);
finish_buf_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
collection_->finish_buf_.RecvInitialMetadata(context_);
finish_buf_.RecvInitialMetadata(context_);
}
collection_->finish_buf_.RecvMessage(msg);
collection_->finish_buf_.AllowNoMessage();
collection_->finish_buf_.ClientRecvStatus(context_, status);
call_.PerformOps(&collection_->finish_buf_);
finish_buf_.RecvMessage(msg);
finish_buf_.AllowNoMessage();
finish_buf_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
}
private:
ClientContext* context_;
ClientContext* const context_;
Call call_;
class CallOpSetCollection : public CallOpSetCollectionInterface {
public:
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
init_buf_;
CallOpSet<CallOpRecvInitialMetadata> meta_buf_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>,
CallOpClientRecvStatus>
finish_buf_;
};
std::shared_ptr<CallOpSetCollection> collection_;
template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request)
: context_(context), call_(call) {
init_buf_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_buf_.SendMessage(request).ok());
init_buf_.ClientSendClose();
call_.PerformOps(&init_buf_);
}
// disable operator new
static void* operator new(std::size_t size);
static void* operator new(std::size_t size, void* p) { return p; };
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
init_buf_;
CallOpSet<CallOpRecvInitialMetadata> meta_buf_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>,
CallOpClientRecvStatus>
finish_buf_;
};
template <class W>
@ -179,4 +190,12 @@ class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
} // namespace grpc
namespace std {
template <class R>
class default_delete<grpc::ClientAsyncResponseReader<R>> {
public:
void operator()(void* p) {}
};
}
#endif // GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_CODEGEN_CALL_H
#define GRPCXX_IMPL_CODEGEN_CALL_H
#include <assert.h>
#include <cstring>
#include <functional>
#include <map>
@ -49,6 +50,7 @@
#include <grpc++/impl/codegen/status.h>
#include <grpc++/impl/codegen/string_ref.h>
#include <grpc/impl/codegen/atm.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/grpc_types.h>
@ -245,8 +247,10 @@ class CallOpSendInitialMetadata {
op->data.send_initial_metadata.metadata = initial_metadata_;
op->data.send_initial_metadata.maybe_compression_level.is_set =
maybe_compression_level_.is_set;
op->data.send_initial_metadata.maybe_compression_level.level =
maybe_compression_level_.level;
if (maybe_compression_level_.is_set) {
op->data.send_initial_metadata.maybe_compression_level.level =
maybe_compression_level_.level;
}
}
void FinishOp(bool* status) {
if (!send_) return;
@ -578,17 +582,6 @@ class CallOpClientRecvStatus {
grpc_slice error_message_;
};
/// An abstract collection of CallOpSet's, to be used whenever
/// CallOpSet objects must be thought of as a group. Each member
/// of the group should have a shared_ptr back to the collection,
/// as will the object that instantiates the collection, allowing
/// for automatic ref-counting. In practice, any actual use should
/// derive from this base class. This is specifically necessary if
/// some of the CallOpSet's in the collection are "Sneaky" and don't
/// report back to the C++ layer CQ operations
class CallOpSetCollectionInterface
: public std::enable_shared_from_this<CallOpSetCollectionInterface> {};
/// An abstract collection of call ops, used to generate the
/// grpc_call_op structure to pass down to the lower layers,
/// and as it is-a CompletionQueueTag, also massages the final
@ -596,18 +589,9 @@ class CallOpSetCollectionInterface
/// API.
class CallOpSetInterface : public CompletionQueueTag {
public:
CallOpSetInterface() {}
/// Fills in grpc_op, starting from ops[*nops] and moving
/// upwards.
virtual void FillOps(grpc_op* ops, size_t* nops) = 0;
/// Mark this as belonging to a collection if needed
void SetCollection(std::shared_ptr<CallOpSetCollectionInterface> collection) {
collection_ = collection;
}
protected:
std::shared_ptr<CallOpSetCollectionInterface> collection_;
virtual void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) = 0;
};
/// Primary implementaiton of CallOpSetInterface.
@ -628,13 +612,15 @@ class CallOpSet : public CallOpSetInterface,
public Op6 {
public:
CallOpSet() : return_tag_(this) {}
void FillOps(grpc_op* ops, size_t* nops) override {
void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) override {
this->Op1::AddOp(ops, nops);
this->Op2::AddOp(ops, nops);
this->Op3::AddOp(ops, nops);
this->Op4::AddOp(ops, nops);
this->Op5::AddOp(ops, nops);
this->Op6::AddOp(ops, nops);
g_core_codegen_interface->grpc_call_ref(call);
call_ = call;
}
bool FinalizeResult(void** tag, bool* status) override {
@ -645,7 +631,7 @@ class CallOpSet : public CallOpSetInterface,
this->Op5::FinishOp(status);
this->Op6::FinishOp(status);
*tag = return_tag_;
collection_.reset(); // drop the ref at this point
g_core_codegen_interface->grpc_call_unref(call_);
return true;
}
@ -653,6 +639,7 @@ class CallOpSet : public CallOpSetInterface,
private:
void* return_tag_;
grpc_call* call_;
};
/// A CallOpSet that does not post completions to the completion queue.

@ -68,6 +68,10 @@ class CoreCodegen : public CoreCodegenInterface {
void gpr_cv_signal(gpr_cv* cv) override;
void gpr_cv_broadcast(gpr_cv* cv) override;
void grpc_call_ref(grpc_call* call) override;
void grpc_call_unref(grpc_call* call) override;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) override;
void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,

@ -96,8 +96,11 @@ class CoreCodegenInterface {
virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) = 0;
virtual grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
void (*destroy)(void*),
void* user_data) = 0;
void (*destroy)(void*),
void* user_data) = 0;
virtual void grpc_call_ref(grpc_call* call) = 0;
virtual void grpc_call_unref(grpc_call* call) = 0;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) = 0;
virtual grpc_slice grpc_empty_slice() = 0;
virtual grpc_slice grpc_slice_malloc(size_t length) = 0;
virtual void grpc_slice_unref(grpc_slice slice) = 0;

@ -265,6 +265,10 @@ GRPCAPI grpc_call *grpc_channel_create_registered_call(
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved);
/** Allocate memory in the grpc_call arena: this memory is automatically
discarded at call completion */
GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size);
/** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance.
@ -341,7 +345,7 @@ GRPCAPI void grpc_channel_destroy(grpc_channel *channel);
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
are thread-safe, and can be called at any point before grpc_call_unref
is called.*/
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
@ -356,9 +360,13 @@ GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
const char *description,
void *reserved);
/** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
GRPCAPI void grpc_call_destroy(grpc_call *call);
/** Ref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
GRPCAPI void grpc_call_ref(grpc_call *call);
/** Unref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
GRPCAPI void grpc_call_unref(grpc_call *call);
/** Request notification of a new call.
Once a call is received, a notification tagged with \a tag_new is added to

@ -1028,6 +1028,79 @@
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls13_server.c" role="src" />
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls_method.c" role="src" />
<file baseinstalldir="/" name="third_party/boringssl/ssl/tls_record.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_data.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_dns.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getenv.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getopt.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_inet_net_pton.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_iphlpapi.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_ipv6.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_library_init.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_llist.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_nowarn.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_platform.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_private.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_rules.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_setup.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strcasecmp.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strdup.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_version.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/bitncmp.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/config-win32.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/setup_once.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/ares_build.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/config_linux/ares_config.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/config_darwin/ares_config.h" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__close_sockets.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__get_hostent.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__read_line.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares__timeval.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_cancel.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_create_query.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_data.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_destroy.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_expand_name.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_expand_string.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_fds.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_free_hostent.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_free_string.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getenv.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_gethostbyaddr.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_gethostbyname.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getnameinfo.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getopt.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_getsock.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_init.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_library_init.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_llist.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_mkquery.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_nowarn.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_options.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_a_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_aaaa_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_mx_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_naptr_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_ns_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_ptr_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_soa_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_srv_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_parse_txt_reply.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_platform.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_process.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_query.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_search.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_send.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strcasecmp.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strdup.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_strerror.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_timeout.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_version.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/ares_writev.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/bitncmp.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/inet_net_pton.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/inet_ntop.c" role="src" />
<file baseinstalldir="/" name="third_party/cares/cares/windows_port.c" role="src" />
</dir>
</contents>
<dependencies>

@ -1106,8 +1106,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return new "
"::grpc::ClientAsyncResponseReader< $Response$>("
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request);\n"
@ -1129,7 +1129,7 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new ::grpc::ClientAsyncWriter< $Request$>("
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, tag);\n"
@ -1152,7 +1152,7 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new ::grpc::ClientAsyncReader< $Response$>("
" return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, tag);\n"
@ -1174,13 +1174,14 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return new "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"
"}\n\n");
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"
"}\n\n");
}
}

@ -1122,6 +1122,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_policy->base.interested_parties,
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
&host, glb_policy->deadline, NULL);
grpc_slice_unref_internal(exec_ctx, host);
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
@ -1152,7 +1153,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_destroy(glb_policy->lb_call);
grpc_call_unref(glb_policy->lb_call);
glb_policy->lb_call = NULL;
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
@ -1293,6 +1294,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
"Received empty server list. Picks will stay pending until a "
"response with > 0 servers is received");
}
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
} else { /* serverlist == NULL */
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",

@ -2150,6 +2150,7 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
(int)bdp);
}
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, bdp);
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, bdp);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@ -2602,6 +2603,7 @@ static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
GPR_TIMER_END("incoming_byte_stream_next", 0);
return true;
} else {
gpr_ref(&bs->refs);

@ -217,8 +217,14 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
return UINT8_MAX;
}
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
grpc_error *orig = *err;
#endif
*err = gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
if (*err != orig) gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
#endif
}
uint8_t placement = (*err)->arena_size;
(*err)->arena_size = (uint8_t)((*err)->arena_size + slots);
@ -313,7 +319,7 @@ static void internal_add_error(grpc_error **err, grpc_error *new) {
// It is very common to include and extra int and string in an error
#define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME)
grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
grpc_error **referencing,
size_t num_referencing) {
GPR_TIMER_BEGIN("grpc_error_create", 0);
@ -339,7 +345,8 @@ grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
memset(err->times, UINT8_MAX, GRPC_ERROR_TIME_MAX);
internal_set_int(&err, GRPC_ERROR_INT_FILE_LINE, line);
internal_set_str(&err, GRPC_ERROR_STR_FILE, file);
internal_set_str(&err, GRPC_ERROR_STR_FILE,
grpc_slice_from_static_string(file));
internal_set_str(&err, GRPC_ERROR_STR_DESCRIPTION, desc);
for (size_t i = 0; i < num_referencing; ++i) {
@ -756,7 +763,7 @@ grpc_error *grpc_os_error(const char *file, int line, int err,
return grpc_error_set_str(
grpc_error_set_str(
grpc_error_set_int(
grpc_error_create(grpc_slice_from_static_string(file), line,
grpc_error_create(file, line,
grpc_slice_from_static_string("OS Error"), NULL,
0),
GRPC_ERROR_INT_ERRNO, err),
@ -772,7 +779,7 @@ grpc_error *grpc_wsa_error(const char *file, int line, int err,
grpc_error *error = grpc_error_set_str(
grpc_error_set_str(
grpc_error_set_int(
grpc_error_create(grpc_slice_from_static_string(file), line,
grpc_error_create(file, line,
grpc_slice_from_static_string("OS Error"), NULL,
0),
GRPC_ERROR_INT_WSA_ERROR, err),

@ -138,7 +138,7 @@ typedef enum {
const char *grpc_error_string(grpc_error *error);
/// Create an error - but use GRPC_ERROR_CREATE instead
grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
grpc_error **referencing, size_t num_referencing);
/// Create an error (this is the preferred way of generating an error that is
/// not due to a system call - for system calls, use GRPC_OS_ERROR or
@ -148,21 +148,21 @@ grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
/// err = grpc_error_create(x, y, z, r, nr) is equivalent to:
/// err = grpc_error_create(x, y, z, NULL, 0);
/// for (i=0; i<nr; i++) err = grpc_error_add_child(err, r[i]);
#define GRPC_ERROR_CREATE_FROM_STATIC_STRING(desc) \
grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
grpc_slice_from_static_string(desc), NULL, 0)
#define GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc) \
grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
grpc_slice_from_copied_string(desc), NULL, 0)
#define GRPC_ERROR_CREATE_FROM_STATIC_STRING(desc) \
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_static_string(desc), \
NULL, 0)
#define GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc) \
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
NULL, 0)
// Create an error that references some other errors. This function adds a
// reference to each error in errs - it does not consume an existing reference
#define GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(desc, errs, count) \
grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
grpc_slice_from_static_string(desc), errs, count)
#define GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(desc, errs, count) \
grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
grpc_slice_from_copied_string(desc), errs, count)
#define GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(desc, errs, count) \
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_static_string(desc), \
errs, count)
#define GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(desc, errs, count) \
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
errs, count)
//#define GRPC_ERROR_REFCOUNT_DEBUG
#ifdef GRPC_ERROR_REFCOUNT_DEBUG

@ -72,11 +72,6 @@ typedef union lockfree_node {
struct gpr_stack_lockfree {
lockfree_node *entries;
lockfree_node head; /* An atomic entry describing curr head */
#ifndef NDEBUG
/* Bitmap of pushed entries to check for double-push or pop */
gpr_atm pushed[(INVALID_ENTRY_INDEX + 1) / (8 * sizeof(gpr_atm))];
#endif
};
gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
@ -91,9 +86,6 @@ gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
/* Clear out all entries */
memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
memset(&stack->head, 0, sizeof(stack->head));
#ifndef NDEBUG
memset(&stack->pushed, 0, sizeof(stack->pushed));
#endif
GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents));
@ -130,19 +122,6 @@ int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);
#ifndef NDEBUG
/* Check for double push */
{
int pushed_index = entry / (int)(8 * sizeof(gpr_atm));
int pushed_bit = entry % (int)(8 * sizeof(gpr_atm));
gpr_atm old_val;
old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
((gpr_atm)1 << pushed_bit));
GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) == 0);
}
#endif
do {
/* Atomically get the existing head value for use */
head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
@ -168,18 +147,6 @@ int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));
} while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
#ifndef NDEBUG
/* Check for valid pop */
{
int pushed_index = head.contents.index / (8 * sizeof(gpr_atm));
int pushed_bit = head.contents.index % (8 * sizeof(gpr_atm));
gpr_atm old_val;
old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
-((gpr_atm)1 << pushed_bit));
GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) != 0);
}
#endif
return head.contents.index;
}

@ -160,6 +160,7 @@ typedef struct {
} child_call;
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
@ -170,7 +171,7 @@ struct grpc_call {
/* client or server call */
bool is_client;
/** has grpc_call_destroy been called */
/** has grpc_call_unref been called */
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
@ -282,6 +283,10 @@ static void add_init_error(grpc_error **composite, grpc_error *new) {
*composite = grpc_error_add_child(*composite, new);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
return gpr_arena_alloc(call->arena, size);
}
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
@ -312,6 +317,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
call = gpr_arena_alloc(arena,
sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
*out_call = call;
call->channel = args->channel;
@ -346,6 +352,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
gpr_timespec send_deadline =
gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
bool immediately_cancel = false;
if (args->parent_call != NULL) {
child_call *cc = call->child_call =
gpr_arena_alloc(arena, sizeof(child_call));
@ -386,8 +394,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
immediately_cancel = true;
}
}
@ -407,7 +414,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
call->send_deadline = send_deadline;
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
/* initial refcount dropped by grpc_call_unref */
grpc_call_element_args call_args = {
.call_stack = CALL_STACK_FROM_CALL(call),
.server_transport_data = args->server_transport_data,
@ -422,6 +429,10 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
GRPC_ERROR_REF(error));
}
if (immediately_cancel) {
cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
}
if (args->cq != NULL) {
GPR_ASSERT(
args->pollset_set_alternative == NULL &&
@ -528,12 +539,16 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GPR_TIMER_END("destroy_call", 0);
}
void grpc_call_destroy(grpc_call *c) {
void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
child_call *cc = c->child_call;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_destroy", 0);
GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
GPR_TIMER_BEGIN("grpc_call_unref", 0);
GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
if (cc) {
parent_call *pc = get_parent_call(cc->parent);
@ -560,7 +575,7 @@ void grpc_call_destroy(grpc_call *c) {
}
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_destroy", 0);
GPR_TIMER_END("grpc_call_unref", 0);
}
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {

@ -345,7 +345,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
grpc_call_destroy(grpc_call_from_top_element(elem));
grpc_call_unref(grpc_call_from_top_element(elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,

@ -131,7 +131,7 @@ void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
ops->FillOps(cops, &nops);
ops->FillOps(call->call(), cops, &nops);
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call->call(), cops, nops, ops, nullptr));
}

@ -74,7 +74,7 @@ ClientContext::ClientContext()
ClientContext::~ClientContext() {
if (call_) {
grpc_call_destroy(call_);
grpc_call_unref(call_);
}
g_client_callbacks->Destructor(this);
}

@ -42,7 +42,7 @@ std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::Call(
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>(
new GenericClientAsyncReaderWriter(
GenericClientAsyncReaderWriter::Create(
channel_.get(), cq,
RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), context, tag));
}

@ -96,6 +96,12 @@ void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
::grpc_byte_buffer_destroy(bb);
}
void CoreCodegen::grpc_call_ref(grpc_call* call) { ::grpc_call_ref(call); }
void CoreCodegen::grpc_call_unref(grpc_call* call) { ::grpc_call_unref(call); }
void* CoreCodegen::grpc_call_arena_alloc(grpc_call* call, size_t length) {
return ::grpc_call_arena_alloc(call, length);
}
int CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
grpc_byte_buffer* buffer) {
return ::grpc_byte_buffer_reader_init(reader, buffer);

@ -607,7 +607,7 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
ops->FillOps(cops, &nops);
ops->FillOps(call->call(), cops, &nops);
auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
GPR_ASSERT(GRPC_CALL_OK == result);
}

@ -62,7 +62,7 @@ class ServerContext::CompletionOp final : public CallOpSetInterface {
finalized_(false),
cancelled_(0) {}
void FillOps(grpc_op* ops, size_t* nops) override;
void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) override;
bool FinalizeResult(void** tag, bool* status) override;
bool CheckCancelled(CompletionQueue* cq) {
@ -100,7 +100,8 @@ void ServerContext::CompletionOp::Unref() {
}
}
void ServerContext::CompletionOp::FillOps(grpc_op* ops, size_t* nops) {
void ServerContext::CompletionOp::FillOps(grpc_call* call, grpc_op* ops,
size_t* nops) {
ops->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops->data.recv_close_on_server.cancelled = &cancelled_;
ops->flags = 0;
@ -151,7 +152,7 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
ServerContext::~ServerContext() {
if (call_) {
grpc_call_destroy(call_);
grpc_call_unref(call_);
}
if (completion_op_) {
completion_op_->Unref();

@ -34,14 +34,14 @@
#include "src/core/lib/support/string.h"
#include <grpc/byte_buffer_reader.h>
#include <grpc/support/port_platform.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <string.h>
@ -84,7 +84,8 @@ typedef struct grpcsharp_batch_context {
int recv_close_on_server_cancelled;
} grpcsharp_batch_context;
GPR_EXPORT grpcsharp_batch_context *GPR_CALLTYPE grpcsharp_batch_context_create() {
GPR_EXPORT grpcsharp_batch_context *GPR_CALLTYPE
grpcsharp_batch_context_create() {
grpcsharp_batch_context *ctx = gpr_malloc(sizeof(grpcsharp_batch_context));
memset(ctx, 0, sizeof(grpcsharp_batch_context));
return ctx;
@ -96,8 +97,10 @@ typedef struct {
grpc_metadata_array request_metadata;
} grpcsharp_request_call_context;
GPR_EXPORT grpcsharp_request_call_context *GPR_CALLTYPE grpcsharp_request_call_context_create() {
grpcsharp_request_call_context *ctx = gpr_malloc(sizeof(grpcsharp_request_call_context));
GPR_EXPORT grpcsharp_request_call_context *GPR_CALLTYPE
grpcsharp_request_call_context_create() {
grpcsharp_request_call_context *ctx =
gpr_malloc(sizeof(grpcsharp_request_call_context));
memset(ctx, 0, sizeof(grpcsharp_request_call_context));
return ctx;
}
@ -175,15 +178,15 @@ grpcsharp_metadata_array_count(grpc_metadata_array *array) {
return (intptr_t)array->count;
}
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_metadata_array_get_key(grpc_metadata_array *array, size_t index, size_t *key_length) {
GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_metadata_array_get_key(
grpc_metadata_array *array, size_t index, size_t *key_length) {
GPR_ASSERT(index < array->count);
*key_length = GRPC_SLICE_LENGTH(array->metadata[index].key);
return (char *)GRPC_SLICE_START_PTR(array->metadata[index].key);
}
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_metadata_array_get_value(grpc_metadata_array *array, size_t index, size_t *value_length) {
GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_metadata_array_get_value(
grpc_metadata_array *array, size_t index, size_t *value_length) {
GPR_ASSERT(index < array->count);
*value_length = GRPC_SLICE_LENGTH(array->metadata[index].value);
return (char *)GRPC_SLICE_START_PTR(array->metadata[index].value);
@ -208,7 +211,8 @@ void grpcsharp_metadata_array_move(grpc_metadata_array *dest,
src->metadata = NULL;
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_context *ctx) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_batch_context_destroy(grpcsharp_batch_context *ctx) {
if (!ctx) {
return;
}
@ -231,7 +235,8 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_con
gpr_free(ctx);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_request_call_context_destroy(grpcsharp_request_call_context *ctx) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_request_call_context_destroy(grpcsharp_request_call_context *ctx) {
if (!ctx) {
return;
}
@ -240,8 +245,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_request_call_context_destroy(grpcsharp_re
to take its ownership. */
grpc_call_details_destroy(&(ctx->call_details));
grpcsharp_metadata_array_destroy_metadata_only(
&(ctx->request_metadata));
grpcsharp_metadata_array_destroy_metadata_only(&(ctx->request_metadata));
gpr_free(ctx);
}
@ -299,8 +303,10 @@ grpcsharp_batch_context_recv_status_on_client_status(
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_batch_context_recv_status_on_client_details(
const grpcsharp_batch_context *ctx, size_t *details_length) {
*details_length = GRPC_SLICE_LENGTH(ctx->recv_status_on_client.status_details);
return (char *)GRPC_SLICE_START_PTR(ctx->recv_status_on_client.status_details);
*details_length =
GRPC_SLICE_LENGTH(ctx->recv_status_on_client.status_details);
return (char *)GRPC_SLICE_START_PTR(
ctx->recv_status_on_client.status_details);
}
GPR_EXPORT const grpc_metadata_array *GPR_CALLTYPE
@ -309,13 +315,12 @@ grpcsharp_batch_context_recv_status_on_client_trailing_metadata(
return &(ctx->recv_status_on_client.trailing_metadata);
}
GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_request_call_context_call(
const grpcsharp_request_call_context *ctx) {
GPR_EXPORT grpc_call *GPR_CALLTYPE
grpcsharp_request_call_context_call(const grpcsharp_request_call_context *ctx) {
return ctx->call;
}
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_request_call_context_method(
GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_request_call_context_method(
const grpcsharp_request_call_context *ctx, size_t *method_length) {
*method_length = GRPC_SLICE_LENGTH(ctx->call_details.method);
return (char *)GRPC_SLICE_START_PTR(ctx->call_details.method);
@ -327,8 +332,7 @@ GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_request_call_context_host(
return (char *)GRPC_SLICE_START_PTR(ctx->call_details.host);
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE
grpcsharp_request_call_context_deadline(
GPR_EXPORT gpr_timespec GPR_CALLTYPE grpcsharp_request_call_context_deadline(
const grpcsharp_request_call_context *ctx) {
return ctx->call_details.deadline;
}
@ -342,7 +346,7 @@ grpcsharp_request_call_context_request_metadata(
GPR_EXPORT int32_t GPR_CALLTYPE
grpcsharp_batch_context_recv_close_on_server_cancelled(
const grpcsharp_batch_context *ctx) {
return (int32_t) ctx->recv_close_on_server_cancelled;
return (int32_t)ctx->recv_close_on_server_cancelled;
}
/* Init & shutdown */
@ -389,7 +393,8 @@ grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) {
GPR_EXPORT grpc_channel *GPR_CALLTYPE
grpcsharp_insecure_channel_create(const char *target, const grpc_channel_args *args) {
grpcsharp_insecure_channel_create(const char *target,
const grpc_channel_args *args) {
return grpc_insecure_channel_create(target, args, NULL);
}
@ -397,12 +402,10 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_destroy(grpc_channel *channel) {
grpc_channel_destroy(channel);
}
GPR_EXPORT grpc_call *GPR_CALLTYPE
grpcsharp_channel_create_call(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
const char *method, const char *host,
gpr_timespec deadline) {
GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_channel_create_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *cq, const char *method, const char *host,
gpr_timespec deadline) {
grpc_slice method_slice = grpc_slice_from_copied_string(method);
grpc_slice *host_slice_ptr = NULL;
grpc_slice host_slice;
@ -415,18 +418,21 @@ grpcsharp_channel_create_call(grpc_channel *channel, grpc_call *parent_call,
}
GPR_EXPORT grpc_connectivity_state GPR_CALLTYPE
grpcsharp_channel_check_connectivity_state(grpc_channel *channel, int32_t try_to_connect) {
grpcsharp_channel_check_connectivity_state(grpc_channel *channel,
int32_t try_to_connect) {
return grpc_channel_check_connectivity_state(channel, try_to_connect);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, grpcsharp_batch_context *ctx) {
grpc_channel_watch_connectivity_state(channel, last_observed_state,
deadline, cq, ctx);
gpr_timespec deadline, grpc_completion_queue *cq,
grpcsharp_batch_context *ctx) {
grpc_channel_watch_connectivity_state(channel, last_observed_state, deadline,
cq, ctx);
}
GPR_EXPORT char *GPR_CALLTYPE grpcsharp_channel_get_target(grpc_channel *channel) {
GPR_EXPORT char *GPR_CALLTYPE
grpcsharp_channel_get_target(grpc_channel *channel) {
return grpc_channel_get_target(channel);
}
@ -444,9 +450,8 @@ grpcsharp_channel_args_create(size_t num_args) {
return args;
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_channel_args_set_string(grpc_channel_args *args, size_t index,
const char *key, const char *value) {
GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_args_set_string(
grpc_channel_args *args, size_t index, const char *key, const char *value) {
GPR_ASSERT(args);
GPR_ASSERT(index < args->num_args);
args->args[index].type = GRPC_ARG_STRING;
@ -454,9 +459,8 @@ grpcsharp_channel_args_set_string(grpc_channel_args *args, size_t index,
args->args[index].value.string = gpr_strdup(value);
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_channel_args_set_integer(grpc_channel_args *args, size_t index,
const char *key, int value) {
GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_args_set_integer(
grpc_channel_args *args, size_t index, const char *key, int value) {
GPR_ASSERT(args);
GPR_ASSERT(index < args->num_args);
args->args[index].type = GRPC_ARG_INTEGER;
@ -485,15 +489,18 @@ GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_now(gpr_clock_type clock_type) {
return gpr_now(clock_type);
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_inf_future(gpr_clock_type clock_type) {
GPR_EXPORT gpr_timespec GPR_CALLTYPE
gprsharp_inf_future(gpr_clock_type clock_type) {
return gpr_inf_future(clock_type);
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_inf_past(gpr_clock_type clock_type) {
GPR_EXPORT gpr_timespec GPR_CALLTYPE
gprsharp_inf_past(gpr_clock_type clock_type) {
return gpr_inf_past(clock_type);
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock) {
GPR_EXPORT gpr_timespec GPR_CALLTYPE
gprsharp_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock) {
return gpr_convert_clock_type(t, target_clock);
}
@ -507,9 +514,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_cancel(grpc_call *call) {
return grpc_call_cancel(call, NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_cancel_with_status(grpc_call *call, grpc_status_code status,
const char *description) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_cancel_with_status(
grpc_call *call, grpc_status_code status, const char *description) {
return grpc_call_cancel_with_status(call, status, description, NULL);
}
@ -517,18 +523,16 @@ GPR_EXPORT char *GPR_CALLTYPE grpcsharp_call_get_peer(grpc_call *call) {
return grpc_call_get_peer(call);
}
GPR_EXPORT void GPR_CALLTYPE gprsharp_free(void *p) {
gpr_free(p);
}
GPR_EXPORT void GPR_CALLTYPE gprsharp_free(void *p) { gpr_free(p); }
GPR_EXPORT void GPR_CALLTYPE grpcsharp_call_destroy(grpc_call *call) {
grpc_call_destroy(call);
grpc_call_unref(call);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
const char *send_buffer, size_t send_buffer_len, uint32_t write_flags,
grpc_metadata_array *initial_metadata, uint32_t initial_metadata_flags) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_unary(
grpc_call *call, grpcsharp_batch_context *ctx, const char *send_buffer,
size_t send_buffer_len, uint32_t write_flags,
grpc_metadata_array *initial_metadata, uint32_t initial_metadata_flags) {
/* TODO: don't use magic number */
grpc_op ops[6];
memset(ops, 0, sizeof(ops));
@ -576,11 +580,9 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_client_streaming(grpc_call *call,
grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata,
uint32_t initial_metadata_flags) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_client_streaming(
grpc_call *call, grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata, uint32_t initial_metadata_flags) {
/* TODO: don't use magic number */
grpc_op ops[4];
memset(ops, 0, sizeof(ops));
@ -658,11 +660,9 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_duplex_streaming(grpc_call *call,
grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata,
uint32_t initial_metadata_flags) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_duplex_streaming(
grpc_call *call, grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata, uint32_t initial_metadata_flags) {
/* TODO: don't use magic number */
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
@ -690,7 +690,7 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_recv_initial_metadata(
grpc_call *call, grpcsharp_batch_context *ctx) {
grpc_call *call, grpcsharp_batch_context *ctx) {
/* TODO: don't use magic number */
grpc_op ops[1];
ops[0].op = GRPC_OP_RECV_INITIAL_METADATA;
@ -700,14 +700,13 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_recv_initial_metadata(
ops[0].reserved = NULL;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
const char *send_buffer, size_t send_buffer_len,
uint32_t write_flags,
int32_t send_empty_initial_metadata) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_message(
grpc_call *call, grpcsharp_batch_context *ctx, const char *send_buffer,
size_t send_buffer_len, uint32_t write_flags,
int32_t send_empty_initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
@ -724,9 +723,8 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
return grpc_call_start_batch(call, ops, nops, ctx, NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_close_from_client(grpc_call *call,
grpcsharp_batch_context *ctx) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_close_from_client(
grpc_call *call, grpcsharp_batch_context *ctx) {
/* TODO: don't use magic number */
grpc_op ops[1];
ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
@ -740,14 +738,15 @@ grpcsharp_call_send_close_from_client(grpc_call *call,
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
grpc_call *call, grpcsharp_batch_context *ctx, grpc_status_code status_code,
const char *status_details, size_t status_details_len,
grpc_metadata_array *trailing_metadata,
int32_t send_empty_initial_metadata, const char* optional_send_buffer,
size_t optional_send_buffer_len, uint32_t write_flags) {
grpc_metadata_array *trailing_metadata, int32_t send_empty_initial_metadata,
const char *optional_send_buffer, size_t optional_send_buffer_len,
uint32_t write_flags) {
/* TODO: don't use magic number */
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
size_t nops = 1;
grpc_slice status_details_slice = grpc_slice_from_copied_buffer(status_details, status_details_len);
grpc_slice status_details_slice =
grpc_slice_from_copied_buffer(status_details, status_details_len);
ops[0].op = GRPC_OP_SEND_STATUS_FROM_SERVER;
ops[0].data.send_status_from_server.status = status_code;
ops[0].data.send_status_from_server.status_details = &status_details_slice;
@ -761,8 +760,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
ops[0].reserved = NULL;
if (optional_send_buffer) {
ops[nops].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(optional_send_buffer,
optional_send_buffer_len);
ctx->send_message =
string_to_byte_buffer(optional_send_buffer, optional_send_buffer_len);
ops[nops].data.send_message.send_message = ctx->send_message;
ops[nops].flags = write_flags;
ops[nops].reserved = NULL;
@ -803,10 +802,9 @@ grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) {
NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_initial_metadata(grpc_call *call,
grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_initial_metadata(
grpc_call *call, grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[1];
memset(ops, 0, sizeof(ops));
@ -823,9 +821,8 @@ grpcsharp_call_send_initial_metadata(grpc_call *call,
NULL);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_set_credentials(
grpc_call *call,
grpc_call_credentials *creds) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_set_credentials(grpc_call *call, grpc_call_credentials *creds) {
return grpc_call_set_credentials(call, creds);
}
@ -836,14 +833,13 @@ grpcsharp_server_create(const grpc_channel_args *args) {
return grpc_server_create(args, NULL);
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq) {
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_register_completion_queue(
grpc_server *server, grpc_completion_queue *cq) {
grpc_server_register_completion_queue(server, cq, NULL);
}
GPR_EXPORT int32_t GPR_CALLTYPE
grpcsharp_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
GPR_EXPORT int32_t GPR_CALLTYPE grpcsharp_server_add_insecure_http2_port(
grpc_server *server, const char *addr) {
return grpc_server_add_insecure_http2_port(server, addr);
}
@ -851,14 +847,14 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_start(grpc_server *server) {
grpc_server_start(server);
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_shutdown_and_notify_callback(grpc_server *server,
grpc_completion_queue *cq,
grpcsharp_batch_context *ctx) {
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_shutdown_and_notify_callback(
grpc_server *server, grpc_completion_queue *cq,
grpcsharp_batch_context *ctx) {
grpc_server_shutdown_and_notify(server, cq, ctx);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_cancel_all_calls(grpc_server *server) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_cancel_all_calls(grpc_server *server) {
grpc_server_cancel_all_calls(server);
}
@ -869,9 +865,8 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_server_request_call(grpc_server *server, grpc_completion_queue *cq,
grpcsharp_request_call_context *ctx) {
return grpc_server_request_call(
server, &(ctx->call), &(ctx->call_details),
&(ctx->request_metadata), cq, cq, ctx);
return grpc_server_request_call(server, &(ctx->call), &(ctx->call_details),
&(ctx->request_metadata), cq, cq, ctx);
}
/* Security */
@ -888,8 +883,8 @@ static grpc_ssl_roots_override_result override_ssl_roots_handler(
return GRPC_SSL_ROOTS_OVERRIDE_OK;
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_override_default_ssl_roots(
const char *pem_root_certs) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_override_default_ssl_roots(const char *pem_root_certs) {
/*
* This currently wastes ~300kB of memory by keeping a copy of roots
* in a static variable, but for desktop/server use, the overhead
@ -916,20 +911,19 @@ grpcsharp_ssl_credentials_create(const char *pem_root_certs,
}
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_credentials_release(
grpc_channel_credentials *creds) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_channel_credentials_release(grpc_channel_credentials *creds) {
grpc_channel_credentials_release(creds);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_call_credentials_release(
grpc_call_credentials *creds) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_call_credentials_release(grpc_call_credentials *creds) {
grpc_call_credentials_release(creds);
}
GPR_EXPORT grpc_channel *GPR_CALLTYPE
grpcsharp_secure_channel_create(grpc_channel_credentials *creds,
const char *target,
const grpc_channel_args *args) {
GPR_EXPORT grpc_channel *GPR_CALLTYPE grpcsharp_secure_channel_create(
grpc_channel_credentials *creds, const char *target,
const grpc_channel_args *args) {
return grpc_secure_channel_create(creds, target, args, NULL);
}
@ -962,36 +956,36 @@ grpcsharp_ssl_server_credentials_create(
return creds;
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_credentials_release(
grpc_server_credentials *creds) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_credentials_release(grpc_server_credentials *creds) {
grpc_server_credentials_release(creds);
}
GPR_EXPORT int32_t GPR_CALLTYPE
grpcsharp_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_server_credentials *creds) {
GPR_EXPORT int32_t GPR_CALLTYPE grpcsharp_server_add_secure_http2_port(
grpc_server *server, const char *addr, grpc_server_credentials *creds) {
return grpc_server_add_secure_http2_port(server, addr, creds);
}
GPR_EXPORT grpc_channel_credentials *GPR_CALLTYPE grpcsharp_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds,
grpc_call_credentials *call_creds) {
return grpc_composite_channel_credentials_create(channel_creds, call_creds, NULL);
GPR_EXPORT grpc_channel_credentials *GPR_CALLTYPE
grpcsharp_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds,
grpc_call_credentials *call_creds) {
return grpc_composite_channel_credentials_create(channel_creds, call_creds,
NULL);
}
GPR_EXPORT grpc_call_credentials *GPR_CALLTYPE grpcsharp_composite_call_credentials_create(
grpc_call_credentials *creds1,
grpc_call_credentials *creds2) {
GPR_EXPORT grpc_call_credentials *GPR_CALLTYPE
grpcsharp_composite_call_credentials_create(grpc_call_credentials *creds1,
grpc_call_credentials *creds2) {
return grpc_composite_call_credentials_create(creds1, creds2, NULL);
}
/* Metadata credentials plugin */
GPR_EXPORT void GPR_CALLTYPE grpcsharp_metadata_credentials_notify_from_plugin(
grpc_credentials_plugin_metadata_cb cb,
void *user_data, grpc_metadata_array *metadata,
grpc_status_code status, const char *error_details) {
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata_array *metadata, grpc_status_code status,
const char *error_details) {
if (metadata) {
cb(user_data, metadata->metadata, metadata->count, status, error_details);
} else {
@ -1000,16 +994,17 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_metadata_credentials_notify_from_plugin(
}
typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)(
void *state, const char *service_url, const char *method_name,
grpc_credentials_plugin_metadata_cb cb,
void *user_data, int32_t is_destroy);
void *state, const char *service_url, const char *method_name,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
int32_t is_destroy);
static void grpcsharp_get_metadata_handler(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) {
grpcsharp_metadata_interceptor_func interceptor =
(grpcsharp_metadata_interceptor_func)(intptr_t)state;
interceptor(state, context.service_url, context.method_name, cb, user_data, 0);
interceptor(state, context.service_url, context.method_name, cb, user_data,
0);
}
static void grpcsharp_metadata_credentials_destroy_handler(void *state) {
@ -1018,23 +1013,26 @@ static void grpcsharp_metadata_credentials_destroy_handler(void *state) {
interceptor(state, NULL, NULL, NULL, NULL, 1);
}
GPR_EXPORT grpc_call_credentials *GPR_CALLTYPE grpcsharp_metadata_credentials_create_from_plugin(
grpcsharp_metadata_interceptor_func metadata_interceptor) {
GPR_EXPORT grpc_call_credentials *GPR_CALLTYPE
grpcsharp_metadata_credentials_create_from_plugin(
grpcsharp_metadata_interceptor_func metadata_interceptor) {
grpc_metadata_credentials_plugin plugin;
plugin.get_metadata = grpcsharp_get_metadata_handler;
plugin.destroy = grpcsharp_metadata_credentials_destroy_handler;
plugin.state = (void*)(intptr_t)metadata_interceptor;
plugin.state = (void *)(intptr_t)metadata_interceptor;
plugin.type = "";
return grpc_metadata_credentials_create_from_plugin(plugin, NULL);
}
/* Auth context */
GPR_EXPORT grpc_auth_context *GPR_CALLTYPE grpcsharp_call_auth_context(grpc_call *call) {
GPR_EXPORT grpc_auth_context *GPR_CALLTYPE
grpcsharp_call_auth_context(grpc_call *call) {
return grpc_call_auth_context(call);
}
GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_auth_context_peer_identity_property_name(
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx) {
return grpc_auth_context_peer_identity_property_name(ctx);
}
@ -1044,12 +1042,13 @@ grpcsharp_auth_context_property_iterator(const grpc_auth_context *ctx) {
return grpc_auth_context_property_iterator(ctx);
}
GPR_EXPORT const grpc_auth_property *GPR_CALLTYPE grpcsharp_auth_property_iterator_next(
grpc_auth_property_iterator *it) {
GPR_EXPORT const grpc_auth_property *GPR_CALLTYPE
grpcsharp_auth_property_iterator_next(grpc_auth_property_iterator *it) {
return grpc_auth_property_iterator_next(it);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_auth_context_release(grpc_auth_context *ctx) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_auth_context_release(grpc_auth_context *ctx) {
grpc_auth_context_release(ctx);
}

@ -33,10 +33,10 @@
#include <string.h>
#include <node.h>
#include <nan.h>
#include "grpc/grpc.h"
#include <node.h>
#include "grpc/byte_buffer_reader.h"
#include "grpc/grpc.h"
#include "grpc/slice.h"
#include "byte_buffer.h"

@ -36,8 +36,8 @@
#include <string.h>
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
namespace grpc {

@ -31,23 +31,23 @@
*
*/
#include <map>
#include <memory>
#include <vector>
#include <map>
#include <node.h>
#include "grpc/support/log.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "grpc/support/alloc.h"
#include "grpc/support/time.h"
#include "byte_buffer.h"
#include "call.h"
#include "call_credentials.h"
#include "channel.h"
#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "call_credentials.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "grpc/support/alloc.h"
#include "grpc/support/log.h"
#include "grpc/support/time.h"
#include "slice.h"
#include "timeval.h"
@ -101,20 +101,20 @@ bool CreateMetadataArray(Local<Object> metadata, grpc_metadata_array *array) {
HandleScope scope;
Local<Array> keys = Nan::GetOwnPropertyNames(metadata).ToLocalChecked();
for (unsigned int i = 0; i < keys->Length(); i++) {
Local<String> current_key = Nan::To<String>(
Nan::Get(keys, i).ToLocalChecked()).ToLocalChecked();
Local<String> current_key =
Nan::To<String>(Nan::Get(keys, i).ToLocalChecked()).ToLocalChecked();
Local<Value> value_array = Nan::Get(metadata, current_key).ToLocalChecked();
if (!value_array->IsArray()) {
return false;
}
array->capacity += Local<Array>::Cast(value_array)->Length();
}
array->metadata = reinterpret_cast<grpc_metadata*>(
array->metadata = reinterpret_cast<grpc_metadata *>(
gpr_zalloc(array->capacity * sizeof(grpc_metadata)));
for (unsigned int i = 0; i < keys->Length(); i++) {
Local<String> current_key(Nan::To<String>(keys->Get(i)).ToLocalChecked());
Local<Array> values = Local<Array>::Cast(
Nan::Get(metadata, current_key).ToLocalChecked());
Local<Array> values =
Local<Array>::Cast(Nan::Get(metadata, current_key).ToLocalChecked());
grpc_slice key_slice = CreateSliceFromString(current_key);
grpc_slice key_intern_slice = grpc_slice_intern(key_slice);
grpc_slice_unref(key_slice);
@ -157,7 +157,7 @@ Local<Value> ParseMetadata(const grpc_metadata_array *metadata_array) {
size_t length = metadata_array->count;
Local<Object> metadata_object = Nan::New<Object>();
for (unsigned int i = 0; i < length; i++) {
grpc_metadata* elem = &metadata_elements[i];
grpc_metadata *elem = &metadata_elements[i];
// TODO(murgatroid99): Use zero-copy string construction instead
Local<String> key_string = CopyStringFromSlice(elem->key);
Local<Array> array;
@ -183,17 +183,12 @@ Local<Value> Op::GetOpType() const {
return scope.Escape(Nan::New(GetTypeString()).ToLocalChecked());
}
Op::~Op() {
}
Op::~Op() {}
class SendMetadataOp : public Op {
public:
SendMetadataOp() {
grpc_metadata_array_init(&send_metadata);
}
~SendMetadataOp() {
DestroyMetadataArray(&send_metadata);
}
SendMetadataOp() { grpc_metadata_array_init(&send_metadata); }
~SendMetadataOp() { DestroyMetadataArray(&send_metadata); }
Local<Value> GetNodeValue() const {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
@ -206,32 +201,26 @@ class SendMetadataOp : public Op {
if (maybe_metadata.IsEmpty()) {
return false;
}
if (!CreateMetadataArray(maybe_metadata.ToLocalChecked(),
&send_metadata)) {
if (!CreateMetadataArray(maybe_metadata.ToLocalChecked(), &send_metadata)) {
return false;
}
out->data.send_initial_metadata.count = send_metadata.count;
out->data.send_initial_metadata.metadata = send_metadata.metadata;
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "send_metadata";
}
std::string GetTypeString() const { return "send_metadata"; }
private:
grpc_metadata_array send_metadata;
};
class SendMessageOp : public Op {
public:
SendMessageOp() {
send_message = NULL;
}
SendMessageOp() { send_message = NULL; }
~SendMessageOp() {
if (send_message != NULL) {
grpc_byte_buffer_destroy(send_message);
@ -246,8 +235,8 @@ class SendMessageOp : public Op {
return false;
}
Local<Object> object_value = Nan::To<Object>(value).ToLocalChecked();
MaybeLocal<Value> maybe_flag_value = Nan::Get(
object_value, Nan::New("grpcWriteFlags").ToLocalChecked());
MaybeLocal<Value> maybe_flag_value =
Nan::Get(object_value, Nan::New("grpcWriteFlags").ToLocalChecked());
if (!maybe_flag_value.IsEmpty()) {
Local<Value> flag_value = maybe_flag_value.ToLocalChecked();
if (flag_value->IsUint32()) {
@ -259,15 +248,13 @@ class SendMessageOp : public Op {
out->data.send_message.send_message = send_message;
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "send_message";
}
std::string GetTypeString() const { return "send_message"; }
private:
grpc_byte_buffer *send_message;
};
@ -278,25 +265,18 @@ class SendClientCloseOp : public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool ParseOp(Local<Value> value, grpc_op *out) { return true; }
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "client_close";
}
std::string GetTypeString() const { return "client_close"; }
};
class SendServerStatusOp : public Op {
public:
SendServerStatusOp() {
grpc_metadata_array_init(&status_metadata);
}
SendServerStatusOp() { grpc_metadata_array_init(&status_metadata); }
~SendServerStatusOp() {
grpc_slice_unref(details);
DestroyMetadataArray(&status_metadata);
@ -310,18 +290,18 @@ class SendServerStatusOp : public Op {
return false;
}
Local<Object> server_status = Nan::To<Object>(value).ToLocalChecked();
MaybeLocal<Value> maybe_metadata = Nan::Get(
server_status, Nan::New("metadata").ToLocalChecked());
MaybeLocal<Value> maybe_metadata =
Nan::Get(server_status, Nan::New("metadata").ToLocalChecked());
if (maybe_metadata.IsEmpty()) {
return false;
}
if (!maybe_metadata.ToLocalChecked()->IsObject()) {
return false;
}
Local<Object> metadata = Nan::To<Object>(
maybe_metadata.ToLocalChecked()).ToLocalChecked();
MaybeLocal<Value> maybe_code = Nan::Get(server_status,
Nan::New("code").ToLocalChecked());
Local<Object> metadata =
Nan::To<Object>(maybe_metadata.ToLocalChecked()).ToLocalChecked();
MaybeLocal<Value> maybe_code =
Nan::Get(server_status, Nan::New("code").ToLocalChecked());
if (maybe_code.IsEmpty()) {
return false;
}
@ -329,16 +309,16 @@ class SendServerStatusOp : public Op {
return false;
}
uint32_t code = Nan::To<uint32_t>(maybe_code.ToLocalChecked()).FromJust();
MaybeLocal<Value> maybe_details = Nan::Get(
server_status, Nan::New("details").ToLocalChecked());
MaybeLocal<Value> maybe_details =
Nan::Get(server_status, Nan::New("details").ToLocalChecked());
if (maybe_details.IsEmpty()) {
return false;
}
if (!maybe_details.ToLocalChecked()->IsString()) {
return false;
}
Local<String> details = Nan::To<String>(
maybe_details.ToLocalChecked()).ToLocalChecked();
Local<String> details =
Nan::To<String>(maybe_details.ToLocalChecked()).ToLocalChecked();
if (!CreateMetadataArray(metadata, &status_metadata)) {
return false;
}
@ -352,15 +332,11 @@ class SendServerStatusOp : public Op {
out->data.send_status_from_server.status_details = &this->details;
return true;
}
bool IsFinalOp() {
return true;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return true; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "send_status";
}
std::string GetTypeString() const { return "send_status"; }
private:
grpc_slice details;
@ -369,13 +345,9 @@ class SendServerStatusOp : public Op {
class GetMetadataOp : public Op {
public:
GetMetadataOp() {
grpc_metadata_array_init(&recv_metadata);
}
GetMetadataOp() { grpc_metadata_array_init(&recv_metadata); }
~GetMetadataOp() {
grpc_metadata_array_destroy(&recv_metadata);
}
~GetMetadataOp() { grpc_metadata_array_destroy(&recv_metadata); }
Local<Value> GetNodeValue() const {
EscapableHandleScope scope;
@ -386,16 +358,11 @@ class GetMetadataOp : public Op {
out->data.recv_initial_metadata.recv_initial_metadata = &recv_metadata;
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "metadata";
}
std::string GetTypeString() const { return "metadata"; }
private:
grpc_metadata_array recv_metadata;
@ -403,9 +370,7 @@ class GetMetadataOp : public Op {
class ReadMessageOp : public Op {
public:
ReadMessageOp() {
recv_message = NULL;
}
ReadMessageOp() { recv_message = NULL; }
~ReadMessageOp() {
if (recv_message != NULL) {
grpc_byte_buffer_destroy(recv_message);
@ -420,16 +385,11 @@ class ReadMessageOp : public Op {
out->data.recv_message.recv_message = &recv_message;
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "read";
}
std::string GetTypeString() const { return "read"; }
private:
grpc_byte_buffer *recv_message;
@ -437,13 +397,9 @@ class ReadMessageOp : public Op {
class ClientStatusOp : public Op {
public:
ClientStatusOp() {
grpc_metadata_array_init(&metadata_array);
}
ClientStatusOp() { grpc_metadata_array_init(&metadata_array); }
~ClientStatusOp() {
grpc_metadata_array_destroy(&metadata_array);
}
~ClientStatusOp() { grpc_metadata_array_destroy(&metadata_array); }
bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_status_on_client.trailing_metadata = &metadata_array;
@ -456,22 +412,19 @@ class ClientStatusOp : public Op {
EscapableHandleScope scope;
Local<Object> status_obj = Nan::New<Object>();
Nan::Set(status_obj, Nan::New("code").ToLocalChecked(),
Nan::New<Number>(status));
Nan::New<Number>(status));
Nan::Set(status_obj, Nan::New("details").ToLocalChecked(),
CopyStringFromSlice(status_details));
CopyStringFromSlice(status_details));
Nan::Set(status_obj, Nan::New("metadata").ToLocalChecked(),
ParseMetadata(&metadata_array));
return scope.Escape(status_obj);
}
bool IsFinalOp() {
return true;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return true; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "status";
}
std::string GetTypeString() const { return "status"; }
private:
grpc_metadata_array metadata_array;
grpc_status_code status;
@ -489,23 +442,18 @@ class ServerCloseResponseOp : public Op {
out->data.recv_close_on_server.cancelled = &cancelled;
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
protected:
std::string GetTypeString() const {
return "cancelled";
}
std::string GetTypeString() const { return "cancelled"; }
private:
int cancelled;
};
tag::tag(Callback *callback, OpVec *ops, Call *call, Local<Value> call_value) :
callback(callback), ops(ops), call(call){
tag::tag(Callback *callback, OpVec *ops, Call *call, Local<Value> call_value)
: callback(callback), ops(ops), call(call) {
HandleScope scope;
call_persist.Reset(call_value);
}
@ -555,19 +503,15 @@ void DestroyTag(void *tag) {
void Call::DestroyCall() {
if (this->wrapped_call != NULL) {
grpc_call_destroy(this->wrapped_call);
grpc_call_unref(this->wrapped_call);
this->wrapped_call = NULL;
}
}
Call::Call(grpc_call *call) : wrapped_call(call),
pending_batches(0),
has_final_op_completed(false) {
}
Call::Call(grpc_call *call)
: wrapped_call(call), pending_batches(0), has_final_op_completed(false) {}
Call::~Call() {
DestroyCall();
}
Call::~Call() { DestroyCall(); }
void Call::Init(Local<Object> exports) {
HandleScope scope;
@ -596,10 +540,10 @@ Local<Value> Call::WrapStruct(grpc_call *call) {
return scope.Escape(Nan::Null());
}
const int argc = 1;
Local<Value> argv[argc] = {Nan::New<External>(
reinterpret_cast<void *>(call))};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
Local<Value> argv[argc] = {
Nan::New<External>(reinterpret_cast<void *>(call))};
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
return scope.Escape(Nan::Null());
} else {
@ -631,8 +575,7 @@ NAN_METHOD(Call::New) {
if (info[0]->IsExternal()) {
Local<External> ext = info[0].As<External>();
// This option is used for wrapping an existing call
grpc_call *call_value =
reinterpret_cast<grpc_call *>(ext->Value());
grpc_call *call_value = reinterpret_cast<grpc_call *>(ext->Value());
call = new Call(call_value);
} else {
if (!Channel::HasInstance(info[0])) {
@ -648,8 +591,8 @@ NAN_METHOD(Call::New) {
// These arguments are at the end because they are optional
grpc_call *parent_call = NULL;
if (Call::HasInstance(info[4])) {
Call *parent_obj = ObjectWrap::Unwrap<Call>(
Nan::To<Object>(info[4]).ToLocalChecked());
Call *parent_obj =
ObjectWrap::Unwrap<Call>(Nan::To<Object>(info[4]).ToLocalChecked());
parent_call = parent_obj->wrapped_call;
} else if (!(info[4]->IsUndefined() || info[4]->IsNull())) {
return Nan::ThrowTypeError(
@ -670,22 +613,20 @@ NAN_METHOD(Call::New) {
double deadline = Nan::To<double>(info[2]).FromJust();
grpc_channel *wrapped_channel = channel->GetWrappedChannel();
grpc_call *wrapped_call;
grpc_slice method = CreateSliceFromString(
Nan::To<String>(info[1]).ToLocalChecked());
grpc_slice method =
CreateSliceFromString(Nan::To<String>(info[1]).ToLocalChecked());
if (info[3]->IsString()) {
grpc_slice *host = new grpc_slice;
*host = CreateSliceFromString(
Nan::To<String>(info[3]).ToLocalChecked());
*host =
CreateSliceFromString(Nan::To<String>(info[3]).ToLocalChecked());
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
GetCompletionQueue(), method,
host, MillisecondsToTimespec(deadline), NULL);
wrapped_channel, parent_call, propagate_flags, GetCompletionQueue(),
method, host, MillisecondsToTimespec(deadline), NULL);
delete host;
} else if (info[3]->IsUndefined() || info[3]->IsNull()) {
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
GetCompletionQueue(), method,
NULL, MillisecondsToTimespec(deadline), NULL);
wrapped_channel, parent_call, propagate_flags, GetCompletionQueue(),
method, NULL, MillisecondsToTimespec(deadline), NULL);
} else {
return Nan::ThrowTypeError("Call's fourth argument must be a string");
}
@ -699,8 +640,8 @@ NAN_METHOD(Call::New) {
} else {
const int argc = 4;
Local<Value> argv[argc] = {info[0], info[1], info[2], info[3]};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@ -773,8 +714,8 @@ NAN_METHOD(Call::StartBatch) {
}
Callback *callback = new Callback(callback_func);
grpc_call_error error = grpc_call_start_batch(
call->wrapped_call, &ops[0], nops, new struct tag(
callback, op_vector.release(), call, info.This()), NULL);
call->wrapped_call, &ops[0], nops,
new struct tag(callback, op_vector.release(), call, info.This()), NULL);
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("startBatch failed", error));
}
@ -807,8 +748,8 @@ NAN_METHOD(Call::CancelWithStatus) {
"cancelWithStatus's second argument must be a string");
}
Call *call = ObjectWrap::Unwrap<Call>(info.This());
grpc_status_code code = static_cast<grpc_status_code>(
Nan::To<uint32_t>(info[0]).FromJust());
grpc_status_code code =
static_cast<grpc_status_code>(Nan::To<uint32_t>(info[0]).FromJust());
if (code == GRPC_STATUS_OK) {
return Nan::ThrowRangeError(
"cancelWithStatus cannot be called with OK status");

@ -37,14 +37,13 @@
#include <memory>
#include <vector>
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
#include "grpc/support/log.h"
#include "channel.h"
namespace grpc {
namespace node {

@ -31,17 +31,17 @@
*
*/
#include <node.h>
#include <nan.h>
#include <node.h>
#include <uv.h>
#include <queue>
#include "call.h"
#include "call_credentials.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "grpc/support/log.h"
#include "call_credentials.h"
#include "call.h"
namespace grpc {
namespace node {
@ -86,15 +86,15 @@ void CallCredentials::Init(Local<Object> exports) {
fun_tpl.Reset(tpl);
Local<Function> ctr = Nan::GetFunction(tpl).ToLocalChecked();
Nan::Set(ctr, Nan::New("createFromPlugin").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(CreateFromPlugin)).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(CreateFromPlugin))
.ToLocalChecked());
Nan::Set(exports, Nan::New("CallCredentials").ToLocalChecked(), ctr);
constructor = new Nan::Callback(ctr);
Local<FunctionTemplate> callback_tpl =
Nan::New<FunctionTemplate>(PluginCallback);
plugin_callback = new Callback(
Nan::GetFunction(callback_tpl).ToLocalChecked());
plugin_callback =
new Callback(Nan::GetFunction(callback_tpl).ToLocalChecked());
}
bool CallCredentials::HasInstance(Local<Value> val) {
@ -109,9 +109,9 @@ Local<Value> CallCredentials::WrapStruct(grpc_call_credentials *credentials) {
return scope.Escape(Nan::Null());
}
Local<Value> argv[argc] = {
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
return scope.Escape(Nan::Null());
} else {
@ -160,8 +160,6 @@ NAN_METHOD(CallCredentials::Compose) {
info.GetReturnValue().Set(WrapStruct(creds));
}
NAN_METHOD(CallCredentials::CreateFromPlugin) {
if (!info[0]->IsFunction()) {
return Nan::ThrowTypeError(
@ -170,21 +168,19 @@ NAN_METHOD(CallCredentials::CreateFromPlugin) {
grpc_metadata_credentials_plugin plugin;
plugin_state *state = new plugin_state;
state->callback = new Nan::Callback(info[0].As<Function>());
state->pending_callbacks = new std::queue<plugin_callback_data*>();
state->pending_callbacks = new std::queue<plugin_callback_data *>();
uv_mutex_init(&state->plugin_mutex);
uv_async_init(uv_default_loop(),
&state->plugin_async,
SendPluginCallback);
uv_unref((uv_handle_t*)&state->plugin_async);
uv_async_init(uv_default_loop(), &state->plugin_async, SendPluginCallback);
uv_unref((uv_handle_t *)&state->plugin_async);
state->plugin_async.data = state;
plugin.get_metadata = plugin_get_metadata;
plugin.destroy = plugin_destroy_state;
plugin.state = reinterpret_cast<void*>(state);
plugin.state = reinterpret_cast<void *>(state);
plugin.type = "";
grpc_call_credentials *creds = grpc_metadata_credentials_create_from_plugin(
plugin, NULL);
grpc_call_credentials *creds =
grpc_metadata_credentials_create_from_plugin(plugin, NULL);
info.GetReturnValue().Set(WrapStruct(creds));
}
@ -206,34 +202,35 @@ NAN_METHOD(PluginCallback) {
return Nan::ThrowTypeError(
"The callback's fourth argument must be an object");
}
grpc_status_code code = static_cast<grpc_status_code>(
Nan::To<uint32_t>(info[0]).FromJust());
grpc_status_code code =
static_cast<grpc_status_code>(Nan::To<uint32_t>(info[0]).FromJust());
Utf8String details_utf8_str(info[1]);
char *details = *details_utf8_str;
grpc_metadata_array array;
grpc_metadata_array_init(&array);
Local<Object> callback_data = Nan::To<Object>(info[3]).ToLocalChecked();
if (!CreateMetadataArray(Nan::To<Object>(info[2]).ToLocalChecked(),
&array)){
if (!CreateMetadataArray(Nan::To<Object>(info[2]).ToLocalChecked(), &array)) {
return Nan::ThrowError("Failed to parse metadata");
}
grpc_credentials_plugin_metadata_cb cb =
reinterpret_cast<grpc_credentials_plugin_metadata_cb>(
Nan::Get(callback_data,
Nan::New("cb").ToLocalChecked()
).ToLocalChecked().As<External>()->Value());
Nan::Get(callback_data, Nan::New("cb").ToLocalChecked())
.ToLocalChecked()
.As<External>()
->Value());
void *user_data =
Nan::Get(callback_data,
Nan::New("user_data").ToLocalChecked()
).ToLocalChecked().As<External>()->Value();
Nan::Get(callback_data, Nan::New("user_data").ToLocalChecked())
.ToLocalChecked()
.As<External>()
->Value();
cb(user_data, array.metadata, array.count, code, details);
DestroyMetadataArray(&array);
}
NAUV_WORK_CB(SendPluginCallback) {
Nan::HandleScope scope;
plugin_state *state = reinterpret_cast<plugin_state*>(async->data);
std::queue<plugin_callback_data*> callbacks;
plugin_state *state = reinterpret_cast<plugin_state *>(async->data);
std::queue<plugin_callback_data *> callbacks;
uv_mutex_lock(&state->plugin_mutex);
state->pending_callbacks->swap(callbacks);
uv_mutex_unlock(&state->plugin_mutex);
@ -242,16 +239,14 @@ NAUV_WORK_CB(SendPluginCallback) {
callbacks.pop();
Local<Object> callback_data = Nan::New<Object>();
Nan::Set(callback_data, Nan::New("cb").ToLocalChecked(),
Nan::New<v8::External>(reinterpret_cast<void*>(data->cb)));
Nan::New<v8::External>(reinterpret_cast<void *>(data->cb)));
Nan::Set(callback_data, Nan::New("user_data").ToLocalChecked(),
Nan::New<v8::External>(data->user_data));
const int argc = 3;
v8::Local<v8::Value> argv[argc] = {
Nan::New(data->service_url).ToLocalChecked(),
callback_data,
// Get Local<Function> from Nan::Callback*
**plugin_callback
};
Nan::New(data->service_url).ToLocalChecked(), callback_data,
// Get Local<Function> from Nan::Callback*
**plugin_callback};
Nan::Callback *callback = state->callback;
callback->Call(argc, argv);
delete data;
@ -261,7 +256,7 @@ NAUV_WORK_CB(SendPluginCallback) {
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data) {
plugin_state *p_state = reinterpret_cast<plugin_state*>(state);
plugin_state *p_state = reinterpret_cast<plugin_state *>(state);
plugin_callback_data *data = new plugin_callback_data;
data->service_url = context.service_url;
data->cb = cb;
@ -275,7 +270,7 @@ void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
}
void plugin_uv_close_cb(uv_handle_t *handle) {
uv_async_t *async = reinterpret_cast<uv_async_t*>(handle);
uv_async_t *async = reinterpret_cast<uv_async_t *>(handle);
plugin_state *state = reinterpret_cast<plugin_state *>(async->data);
uv_mutex_destroy(&state->plugin_mutex);
delete state->pending_callbacks;
@ -285,7 +280,7 @@ void plugin_uv_close_cb(uv_handle_t *handle) {
void plugin_destroy_state(void *ptr) {
plugin_state *state = reinterpret_cast<plugin_state *>(ptr);
uv_close((uv_handle_t*)&state->plugin_async, plugin_uv_close_cb);
uv_close((uv_handle_t *)&state->plugin_async, plugin_uv_close_cb);
}
} // namespace node

@ -36,8 +36,8 @@
#include <queue>
#include <node.h>
#include <nan.h>
#include <node.h>
#include <uv.h>
#include "grpc/grpc_security.h"
@ -84,7 +84,7 @@ typedef struct plugin_callback_data {
typedef struct plugin_state {
Nan::Callback *callback;
std::queue<plugin_callback_data*> *pending_callbacks;
std::queue<plugin_callback_data *> *pending_callbacks;
uv_mutex_t plugin_mutex;
// async.data == this
uv_async_t plugin_async;

@ -35,15 +35,15 @@
#include "grpc/support/log.h"
#include <node.h>
#include <nan.h>
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include <node.h>
#include "call.h"
#include "channel.h"
#include "channel_credentials.h"
#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "channel_credentials.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "timeval.h"
namespace grpc {
@ -82,8 +82,8 @@ bool ParseChannelArgs(Local<Value> args_val,
*channel_args_ptr = NULL;
return false;
}
grpc_channel_args *channel_args = reinterpret_cast<grpc_channel_args*>(
malloc(sizeof(grpc_channel_args)));
grpc_channel_args *channel_args =
reinterpret_cast<grpc_channel_args *>(malloc(sizeof(grpc_channel_args)));
*channel_args_ptr = channel_args;
Local<Object> args_hash = Nan::To<Object>(args_val).ToLocalChecked();
Local<Array> keys = Nan::GetOwnPropertyNames(args_hash).ToLocalChecked();
@ -104,16 +104,16 @@ bool ParseChannelArgs(Local<Value> args_val,
} else if (value->IsString()) {
Utf8String val_str(value);
channel_args->args[i].type = GRPC_ARG_STRING;
channel_args->args[i].value.string = reinterpret_cast<char*>(
calloc(val_str.length() + 1,sizeof(char)));
memcpy(channel_args->args[i].value.string,
*val_str, val_str.length() + 1);
channel_args->args[i].value.string =
reinterpret_cast<char *>(calloc(val_str.length() + 1, sizeof(char)));
memcpy(channel_args->args[i].value.string, *val_str,
val_str.length() + 1);
} else {
// The value does not match either of the accepted types
return false;
}
channel_args->args[i].key = reinterpret_cast<char*>(
calloc(key_str.length() + 1, sizeof(char)));
channel_args->args[i].key =
reinterpret_cast<char *>(calloc(key_str.length() + 1, sizeof(char)));
memcpy(channel_args->args[i].key, *key_str, key_str.length() + 1);
}
return true;
@ -190,12 +190,13 @@ NAN_METHOD(Channel::New) {
grpc_channel_args *channel_args_ptr = NULL;
if (!ParseChannelArgs(info[2], &channel_args_ptr)) {
DeallocateChannelArgs(channel_args_ptr);
return Nan::ThrowTypeError("Channel options must be an object with "
"string keys and integer or string values");
return Nan::ThrowTypeError(
"Channel options must be an object with "
"string keys and integer or string values");
}
if (creds == NULL) {
wrapped_channel = grpc_insecure_channel_create(*host, channel_args_ptr,
NULL);
wrapped_channel =
grpc_insecure_channel_create(*host, channel_args_ptr, NULL);
} else {
wrapped_channel =
grpc_secure_channel_create(creds, *host, channel_args_ptr, NULL);
@ -208,8 +209,8 @@ NAN_METHOD(Channel::New) {
} else {
const int argc = 3;
Local<Value> argv[argc] = {info[0], info[1], info[2]};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@ -232,11 +233,13 @@ NAN_METHOD(Channel::Close) {
NAN_METHOD(Channel::GetTarget) {
if (!HasInstance(info.This())) {
return Nan::ThrowTypeError("getTarget can only be called on Channel objects");
return Nan::ThrowTypeError(
"getTarget can only be called on Channel objects");
}
Channel *channel = ObjectWrap::Unwrap<Channel>(info.This());
info.GetReturnValue().Set(Nan::New(
grpc_channel_get_target(channel->wrapped_channel)).ToLocalChecked());
info.GetReturnValue().Set(
Nan::New(grpc_channel_get_target(channel->wrapped_channel))
.ToLocalChecked());
}
NAN_METHOD(Channel::GetConnectivityState) {
@ -246,9 +249,8 @@ NAN_METHOD(Channel::GetConnectivityState) {
}
Channel *channel = ObjectWrap::Unwrap<Channel>(info.This());
int try_to_connect = (int)info[0]->Equals(Nan::True());
info.GetReturnValue().Set(
grpc_channel_check_connectivity_state(channel->wrapped_channel,
try_to_connect));
info.GetReturnValue().Set(grpc_channel_check_connectivity_state(
channel->wrapped_channel, try_to_connect));
}
NAN_METHOD(Channel::WatchConnectivityState) {
@ -268,9 +270,8 @@ NAN_METHOD(Channel::WatchConnectivityState) {
return Nan::ThrowTypeError(
"watchConnectivityState's third argument must be a callback");
}
grpc_connectivity_state last_state =
static_cast<grpc_connectivity_state>(
Nan::To<uint32_t>(info[0]).FromJust());
grpc_connectivity_state last_state = static_cast<grpc_connectivity_state>(
Nan::To<uint32_t>(info[0]).FromJust());
double deadline = Nan::To<double>(info[1]).FromJust();
Local<Function> callback_func = info[2].As<Function>();
Nan::Callback *callback = new Callback(callback_func);
@ -279,8 +280,7 @@ NAN_METHOD(Channel::WatchConnectivityState) {
grpc_channel_watch_connectivity_state(
channel->wrapped_channel, last_state, MillisecondsToTimespec(deadline),
GetCompletionQueue(),
new struct tag(callback,
ops.release(), NULL, Nan::Null()));
new struct tag(callback, ops.release(), NULL, Nan::Null()));
CompletionQueueNext();
}

@ -34,8 +34,8 @@
#ifndef NET_GRPC_NODE_CHANNEL_H_
#define NET_GRPC_NODE_CHANNEL_H_
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
namespace grpc {

@ -33,12 +33,12 @@
#include <node.h>
#include "call.h"
#include "call_credentials.h"
#include "channel_credentials.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "grpc/support/log.h"
#include "channel_credentials.h"
#include "call_credentials.h"
#include "call.h"
namespace grpc {
namespace node {
@ -80,12 +80,12 @@ void ChannelCredentials::Init(Local<Object> exports) {
Nan::SetPrototypeMethod(tpl, "compose", Compose);
fun_tpl.Reset(tpl);
Local<Function> ctr = Nan::GetFunction(tpl).ToLocalChecked();
Nan::Set(ctr, Nan::New("createSsl").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(CreateSsl)).ToLocalChecked());
Nan::Set(
ctr, Nan::New("createSsl").ToLocalChecked(),
Nan::GetFunction(Nan::New<FunctionTemplate>(CreateSsl)).ToLocalChecked());
Nan::Set(ctr, Nan::New("createInsecure").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(CreateInsecure)).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(CreateInsecure))
.ToLocalChecked());
Nan::Set(exports, Nan::New("ChannelCredentials").ToLocalChecked(), ctr);
constructor = new Nan::Callback(ctr);
}
@ -100,9 +100,9 @@ Local<Value> ChannelCredentials::WrapStruct(
EscapableHandleScope scope;
const int argc = 1;
Local<Value> argv[argc] = {
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
return scope.Escape(Nan::Null());
} else {
@ -179,11 +179,10 @@ NAN_METHOD(ChannelCredentials::Compose) {
return Nan::ThrowTypeError(
"compose's first argument must be a CallCredentials object");
}
ChannelCredentials *self = ObjectWrap::Unwrap<ChannelCredentials>(
info.This());
ChannelCredentials *self =
ObjectWrap::Unwrap<ChannelCredentials>(info.This());
if (self->wrapped_credentials == NULL) {
return Nan::ThrowTypeError(
"Cannot compose insecure credential");
return Nan::ThrowTypeError("Cannot compose insecure credential");
}
CallCredentials *other = ObjectWrap::Unwrap<CallCredentials>(
Nan::To<Object>(info[0]).ToLocalChecked());

@ -34,8 +34,8 @@
#ifndef NET_GRPC_NODE_CHANNEL_CREDENTIALS_H_
#define NET_GRPC_NODE_CHANNEL_CREDENTIALS_H_
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"

@ -31,8 +31,8 @@
*
*/
#include <v8.h>
#include <grpc/grpc.h>
#include <v8.h>
namespace grpc {
namespace node {

@ -33,8 +33,8 @@
#include <queue>
#include <node.h>
#include <nan.h>
#include <node.h>
#include <v8.h>
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
@ -53,12 +53,12 @@ extern "C" {
#include "call_credentials.h"
#include "channel.h"
#include "channel_credentials.h"
#include "server.h"
#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "server.h"
#include "server_credentials.h"
#include "slice.h"
#include "timeval.h"
#include "completion_queue.h"
using grpc::node::CreateSliceFromString;
@ -188,8 +188,7 @@ void InitOpTypeConstants(Local<Object> exports) {
Nan::New<Uint32, uint32_t>(GRPC_OP_SEND_INITIAL_METADATA));
Nan::Set(op_type, Nan::New("SEND_INITIAL_METADATA").ToLocalChecked(),
SEND_INITIAL_METADATA);
Local<Value> SEND_MESSAGE(
Nan::New<Uint32, uint32_t>(GRPC_OP_SEND_MESSAGE));
Local<Value> SEND_MESSAGE(Nan::New<Uint32, uint32_t>(GRPC_OP_SEND_MESSAGE));
Nan::Set(op_type, Nan::New("SEND_MESSAGE").ToLocalChecked(), SEND_MESSAGE);
Local<Value> SEND_CLOSE_FROM_CLIENT(
Nan::New<Uint32, uint32_t>(GRPC_OP_SEND_CLOSE_FROM_CLIENT));
@ -203,8 +202,7 @@ void InitOpTypeConstants(Local<Object> exports) {
Nan::New<Uint32, uint32_t>(GRPC_OP_RECV_INITIAL_METADATA));
Nan::Set(op_type, Nan::New("RECV_INITIAL_METADATA").ToLocalChecked(),
RECV_INITIAL_METADATA);
Local<Value> RECV_MESSAGE(
Nan::New<Uint32, uint32_t>(GRPC_OP_RECV_MESSAGE));
Local<Value> RECV_MESSAGE(Nan::New<Uint32, uint32_t>(GRPC_OP_RECV_MESSAGE));
Nan::Set(op_type, Nan::New("RECV_MESSAGE").ToLocalChecked(), RECV_MESSAGE);
Local<Value> RECV_STATUS_ON_CLIENT(
Nan::New<Uint32, uint32_t>(GRPC_OP_RECV_STATUS_ON_CLIENT));
@ -252,8 +250,7 @@ void InitConnectivityStateConstants(Local<Object> exports) {
Nan::New<Uint32, uint32_t>(GRPC_CHANNEL_TRANSIENT_FAILURE));
Nan::Set(channel_state, Nan::New("TRANSIENT_FAILURE").ToLocalChecked(),
TRANSIENT_FAILURE);
Local<Value> FATAL_FAILURE(
Nan::New<Uint32, uint32_t>(GRPC_CHANNEL_SHUTDOWN));
Local<Value> FATAL_FAILURE(Nan::New<Uint32, uint32_t>(GRPC_CHANNEL_SHUTDOWN));
Nan::Set(channel_state, Nan::New("FATAL_FAILURE").ToLocalChecked(),
FATAL_FAILURE);
}
@ -282,13 +279,11 @@ void InitLogConstants(Local<Object> exports) {
NAN_METHOD(MetadataKeyIsLegal) {
if (!info[0]->IsString()) {
return Nan::ThrowTypeError(
"headerKeyIsLegal's argument must be a string");
return Nan::ThrowTypeError("headerKeyIsLegal's argument must be a string");
}
Local<String> key = Nan::To<String>(info[0]).ToLocalChecked();
grpc_slice slice = CreateSliceFromString(key);
info.GetReturnValue().Set(static_cast<bool>(
grpc_header_key_is_legal(slice)));
info.GetReturnValue().Set(static_cast<bool>(grpc_header_key_is_legal(slice)));
grpc_slice_unref(slice);
}
@ -299,8 +294,8 @@ NAN_METHOD(MetadataNonbinValueIsLegal) {
}
Local<String> value = Nan::To<String>(info[0]).ToLocalChecked();
grpc_slice slice = CreateSliceFromString(value);
info.GetReturnValue().Set(static_cast<bool>(
grpc_header_nonbin_value_is_legal(slice)));
info.GetReturnValue().Set(
static_cast<bool>(grpc_header_nonbin_value_is_legal(slice)));
grpc_slice_unref(slice);
}
@ -311,8 +306,7 @@ NAN_METHOD(MetadataKeyIsBinary) {
}
Local<String> key = Nan::To<String>(info[0]).ToLocalChecked();
grpc_slice slice = CreateSliceFromString(key);
info.GetReturnValue().Set(static_cast<bool>(
grpc_is_binary_header(slice)));
info.GetReturnValue().Set(static_cast<bool>(grpc_is_binary_header(slice)));
grpc_slice_unref(slice);
}
@ -354,11 +348,13 @@ NAUV_WORK_CB(LogMessagesCallback) {
args.pop();
Local<Value> file = Nan::New(arg->core_args.file).ToLocalChecked();
Local<Value> line = Nan::New<Uint32, uint32_t>(arg->core_args.line);
Local<Value> severity = Nan::New(
gpr_log_severity_string(arg->core_args.severity)).ToLocalChecked();
Local<Value> severity =
Nan::New(gpr_log_severity_string(arg->core_args.severity))
.ToLocalChecked();
Local<Value> message = Nan::New(arg->core_args.message).ToLocalChecked();
Local<Value> timestamp = Nan::New<v8::Date>(
grpc::node::TimespecToMilliseconds(arg->timestamp)).ToLocalChecked();
Local<Value> timestamp =
Nan::New<v8::Date>(grpc::node::TimespecToMilliseconds(arg->timestamp))
.ToLocalChecked();
const int argc = 5;
Local<Value> argv[argc] = {file, line, severity, message, timestamp};
grpc_logger_state.callback->Call(argc, argv);
@ -388,10 +384,9 @@ void init_logger() {
memset(&grpc_logger_state, 0, sizeof(logger_state));
grpc_logger_state.pending_args = new std::queue<log_args *>();
uv_mutex_init(&grpc_logger_state.mutex);
uv_async_init(uv_default_loop(),
&grpc_logger_state.async,
uv_async_init(uv_default_loop(), &grpc_logger_state.async,
LogMessagesCallback);
uv_unref((uv_handle_t*)&grpc_logger_state.async);
uv_unref((uv_handle_t *)&grpc_logger_state.async);
grpc_logger_state.logger_set = false;
gpr_log_verbosity_init();
@ -416,11 +411,10 @@ NAN_METHOD(SetDefaultLoggerCallback) {
NAN_METHOD(SetLogVerbosity) {
if (!info[0]->IsUint32()) {
return Nan::ThrowTypeError(
"setLogVerbosity's argument must be a number");
return Nan::ThrowTypeError("setLogVerbosity's argument must be a number");
}
gpr_log_severity severity = static_cast<gpr_log_severity>(
Nan::To<uint32_t>(info[0]).FromJust());
gpr_log_severity severity =
static_cast<gpr_log_severity>(Nan::To<uint32_t>(info[0]).FromJust());
gpr_set_log_verbosity(severity);
}
@ -453,28 +447,25 @@ void init(Local<Object> exports) {
// Attach a few utility functions directly to the module
Nan::Set(exports, Nan::New("metadataKeyIsLegal").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(MetadataKeyIsLegal)).ToLocalChecked());
Nan::Set(exports, Nan::New("metadataNonbinValueIsLegal").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(MetadataNonbinValueIsLegal)
).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(MetadataKeyIsLegal))
.ToLocalChecked());
Nan::Set(
exports, Nan::New("metadataNonbinValueIsLegal").ToLocalChecked(),
Nan::GetFunction(Nan::New<FunctionTemplate>(MetadataNonbinValueIsLegal))
.ToLocalChecked());
Nan::Set(exports, Nan::New("metadataKeyIsBinary").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(MetadataKeyIsBinary)
).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(MetadataKeyIsBinary))
.ToLocalChecked());
Nan::Set(exports, Nan::New("setDefaultRootsPem").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(SetDefaultRootsPem)
).ToLocalChecked());
Nan::Set(exports, Nan::New("setDefaultLoggerCallback").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(SetDefaultLoggerCallback)
).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(SetDefaultRootsPem))
.ToLocalChecked());
Nan::Set(
exports, Nan::New("setDefaultLoggerCallback").ToLocalChecked(),
Nan::GetFunction(Nan::New<FunctionTemplate>(SetDefaultLoggerCallback))
.ToLocalChecked());
Nan::Set(exports, Nan::New("setLogVerbosity").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(SetLogVerbosity)
).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(SetLogVerbosity))
.ToLocalChecked());
}
NODE_MODULE(grpc_node, init)

@ -111,14 +111,9 @@ class NewCallOp : public Op {
return scope.Escape(obj);
}
bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
return false;
}
void OnComplete(bool success) {
}
bool ParseOp(Local<Value> value, grpc_op *out) { return true; }
bool IsFinalOp() { return false; }
void OnComplete(bool success) {}
grpc_call *call;
grpc_call_details details;
@ -128,7 +123,7 @@ class NewCallOp : public Op {
std::string GetTypeString() const { return "new_call"; }
};
class TryShutdownOp: public Op {
class TryShutdownOp : public Op {
public:
TryShutdownOp(Server *server, Local<Value> server_value) : server(server) {
server_persist.Reset(server_value);
@ -137,19 +132,17 @@ class TryShutdownOp: public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::New(server_persist));
}
bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
return false;
}
bool ParseOp(Local<Value> value, grpc_op *out) { return true; }
bool IsFinalOp() { return false; }
void OnComplete(bool success) {
if (success) {
server->DestroyWrappedServer();
}
}
protected:
std::string GetTypeString() const { return "try_shutdown"; }
private:
Server *server;
Nan::Persistent<v8::Value, Nan::CopyablePersistentTraits<v8::Value>>
@ -227,10 +220,9 @@ NAN_METHOD(Server::RequestCall) {
ops->push_back(unique_ptr<Op>(op));
grpc_call_error error = grpc_server_request_call(
server->wrapped_server, &op->call, &op->details, &op->request_metadata,
GetCompletionQueue(),
GetCompletionQueue(),
new struct tag(new Callback(info[0].As<Function>()), ops.release(),
NULL, Nan::Null()));
GetCompletionQueue(), GetCompletionQueue(),
new struct tag(new Callback(info[0].As<Function>()), ops.release(), NULL,
Nan::Null()));
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("requestCall failed", error));
}

@ -34,8 +34,8 @@
#ifndef NET_GRPC_NODE_SERVER_H_
#define NET_GRPC_NODE_SERVER_H_
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
namespace grpc {

@ -78,12 +78,12 @@ void ServerCredentials::Init(Local<Object> exports) {
tpl->SetClassName(Nan::New("ServerCredentials").ToLocalChecked());
tpl->InstanceTemplate()->SetInternalFieldCount(1);
Local<Function> ctr = tpl->GetFunction();
Nan::Set(ctr, Nan::New("createSsl").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(CreateSsl)).ToLocalChecked());
Nan::Set(
ctr, Nan::New("createSsl").ToLocalChecked(),
Nan::GetFunction(Nan::New<FunctionTemplate>(CreateSsl)).ToLocalChecked());
Nan::Set(ctr, Nan::New("createInsecure").ToLocalChecked(),
Nan::GetFunction(
Nan::New<FunctionTemplate>(CreateInsecure)).ToLocalChecked());
Nan::GetFunction(Nan::New<FunctionTemplate>(CreateInsecure))
.ToLocalChecked());
fun_tpl.Reset(tpl);
constructor = new Nan::Callback(ctr);
Nan::Set(exports, Nan::New("ServerCredentials").ToLocalChecked(), ctr);
@ -99,9 +99,9 @@ Local<Value> ServerCredentials::WrapStruct(
Nan::EscapableHandleScope scope;
const int argc = 1;
Local<Value> argv[argc] = {
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance = Nan::NewInstance(
constructor->GetFunction(), argc, argv);
Nan::New<External>(reinterpret_cast<void *>(credentials))};
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
return scope.Escape(Nan::Null());
} else {
@ -160,13 +160,13 @@ NAN_METHOD(ServerCredentials::CreateSsl) {
}
Local<Array> pair_list = Local<Array>::Cast(info[1]);
uint32_t key_cert_pair_count = pair_list->Length();
grpc_ssl_pem_key_cert_pair *key_cert_pairs = new grpc_ssl_pem_key_cert_pair[
key_cert_pair_count];
grpc_ssl_pem_key_cert_pair *key_cert_pairs =
new grpc_ssl_pem_key_cert_pair[key_cert_pair_count];
Local<String> key_key = Nan::New("private_key").ToLocalChecked();
Local<String> cert_key = Nan::New("cert_chain").ToLocalChecked();
for(uint32_t i = 0; i < key_cert_pair_count; i++) {
for (uint32_t i = 0; i < key_cert_pair_count; i++) {
Local<Value> pair_val = Nan::Get(pair_list, i).ToLocalChecked();
if (!pair_val->IsObject()) {
delete[] key_cert_pairs;

@ -34,8 +34,8 @@
#ifndef NET_GRPC_NODE_SERVER_CREDENTIALS_H_
#define NET_GRPC_NODE_SERVER_CREDENTIALS_H_
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"

@ -35,8 +35,8 @@
#include "server.h"
#include <node.h>
#include <nan.h>
#include <node.h>
#include "grpc/grpc.h"
#include "grpc/support/time.h"
@ -60,22 +60,14 @@ static Callback *shutdown_callback = NULL;
class ServerShutdownOp : public Op {
public:
ServerShutdownOp(grpc_server *server): server(server) {
}
ServerShutdownOp(grpc_server *server) : server(server) {}
~ServerShutdownOp() {
}
~ServerShutdownOp() {}
Local<Value> GetNodeValue() const {
return Nan::Null();
}
Local<Value> GetNodeValue() const { return Nan::Null(); }
bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
return false;
}
bool ParseOp(Local<Value> value, grpc_op *out) { return true; }
bool IsFinalOp() { return false; }
void OnComplete(bool success) {
/* Because cancel_all_calls was called, we assume that shutdown_and_notify
completes successfully */
@ -88,12 +80,9 @@ class ServerShutdownOp : public Op {
std::string GetTypeString() const { return "shutdown"; }
};
Server::Server(grpc_server *server) : wrapped_server(server) {
}
Server::Server(grpc_server *server) : wrapped_server(server) {}
Server::~Server() {
this->ShutdownServer();
}
Server::~Server() { this->ShutdownServer(); }
NAN_METHOD(ServerShutdownCallback) {
if (!info[0]->IsNull()) {
@ -105,10 +94,10 @@ void Server::ShutdownServer() {
Nan::HandleScope scope;
if (this->wrapped_server != NULL) {
if (shutdown_callback == NULL) {
Local<FunctionTemplate>callback_tpl =
Local<FunctionTemplate> callback_tpl =
Nan::New<FunctionTemplate>(ServerShutdownCallback);
shutdown_callback = new Callback(
Nan::GetFunction(callback_tpl).ToLocalChecked());
shutdown_callback =
new Callback(Nan::GetFunction(callback_tpl).ToLocalChecked());
}
ServerShutdownOp *op = new ServerShutdownOp(this->wrapped_server);

@ -31,10 +31,10 @@
*
*/
#include <node.h>
#include <nan.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <nan.h>
#include <node.h>
#include "slice.h"
@ -49,19 +49,19 @@ using v8::Value;
namespace {
void SliceFreeCallback(char *data, void *hint) {
grpc_slice *slice = reinterpret_cast<grpc_slice*>(hint);
grpc_slice *slice = reinterpret_cast<grpc_slice *>(hint);
grpc_slice_unref(*slice);
delete slice;
}
void string_destroy_func(void *user_data) {
delete reinterpret_cast<Nan::Utf8String*>(user_data);
delete reinterpret_cast<Nan::Utf8String *>(user_data);
}
void buffer_destroy_func(void *user_data) {
delete reinterpret_cast<PersistentValue*>(user_data);
delete reinterpret_cast<PersistentValue *>(user_data);
}
} // namespace
} // namespace
grpc_slice CreateSliceFromString(const Local<String> source) {
Nan::HandleScope scope;
@ -73,28 +73,32 @@ grpc_slice CreateSliceFromString(const Local<String> source) {
grpc_slice CreateSliceFromBuffer(const Local<Value> source) {
// Prerequisite: ::node::Buffer::HasInstance(source)
Nan::HandleScope scope;
return grpc_slice_new_with_user_data(::node::Buffer::Data(source),
::node::Buffer::Length(source),
buffer_destroy_func,
new PersistentValue(source));
return grpc_slice_new_with_user_data(
::node::Buffer::Data(source), ::node::Buffer::Length(source),
buffer_destroy_func, new PersistentValue(source));
}
Local<String> CopyStringFromSlice(const grpc_slice slice) {
Nan::EscapableHandleScope scope;
if (GRPC_SLICE_LENGTH(slice) == 0) {
return scope.Escape(Nan::EmptyString());
}
return scope.Escape(Nan::New<String>(
const_cast<char *>(reinterpret_cast<const char *>(GRPC_SLICE_START_PTR(slice))),
GRPC_SLICE_LENGTH(slice)).ToLocalChecked());
return scope.Escape(
Nan::New<String>(const_cast<char *>(reinterpret_cast<const char *>(
GRPC_SLICE_START_PTR(slice))),
GRPC_SLICE_LENGTH(slice))
.ToLocalChecked());
}
Local<Value> CreateBufferFromSlice(const grpc_slice slice) {
Nan::EscapableHandleScope scope;
grpc_slice *slice_ptr = new grpc_slice;
*slice_ptr = grpc_slice_ref(slice);
return scope.Escape(Nan::NewBuffer(
const_cast<char *>(reinterpret_cast<const char *>(GRPC_SLICE_START_PTR(*slice_ptr))),
GRPC_SLICE_LENGTH(*slice_ptr), SliceFreeCallback, slice_ptr).ToLocalChecked());
return scope.Escape(
Nan::NewBuffer(
const_cast<char *>(
reinterpret_cast<const char *>(GRPC_SLICE_START_PTR(*slice_ptr))),
GRPC_SLICE_LENGTH(*slice_ptr), SliceFreeCallback, slice_ptr)
.ToLocalChecked());
}
} // namespace node

@ -31,14 +31,15 @@
*
*/
#include <node.h>
#include <nan.h>
#include <grpc/slice.h>
#include <nan.h>
#include <node.h>
namespace grpc {
namespace node {
typedef Nan::Persistent<v8::Value, Nan::CopyablePersistentTraits<v8::Value>> PersistentValue;
typedef Nan::Persistent<v8::Value, Nan::CopyablePersistentTraits<v8::Value>>
PersistentValue;
grpc_slice CreateSliceFromString(const v8::Local<v8::String> source);

@ -31,8 +31,8 @@
*
*/
#include <limits>
#include <cstdint>
#include <limits>
#include "grpc/grpc.h"
#include "grpc/support/time.h"

@ -88,7 +88,10 @@ function timeDiffToNanos(time_diff) {
*/
function BenchmarkClient(server_targets, channels, histogram_params,
security_params) {
var options = {};
var options = {
"grpc.max_receive_message_length": -1,
"grpc.max_send_message_length": -1
};
var creds;
if (security_params) {
var ca_path;
@ -180,6 +183,8 @@ BenchmarkClient.prototype.startClosedLoop = function(
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var makeCall;
var argument;
@ -270,6 +275,8 @@ BenchmarkClient.prototype.startPoisson = function(
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var makeCall;
var argument;
@ -354,9 +361,11 @@ BenchmarkClient.prototype.startPoisson = function(
*/
BenchmarkClient.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
var histogram = this.histogram;
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.histogram = new Histogram(histogram.resolution,
histogram.max_possible);
}
@ -371,9 +380,8 @@ BenchmarkClient.prototype.mark = function(reset) {
count: histogram.getCount()
},
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -95,7 +95,6 @@ function BenchmarkClient(server_targets, channels, histogram_params,
var host_port;
host_port = server_targets[i % server_targets.length].split(':');
var new_options = _.assign({hostname: host_port[0], port: +host_port[1]}, options);
new_options.agent = new protocol.Agent(new_options);
this.client_options[i] = new_options;
}
@ -137,6 +136,7 @@ BenchmarkClient.prototype.startClosedLoop = function(
}
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var argument = {
response_size: resp_size,
@ -207,6 +207,7 @@ BenchmarkClient.prototype.startPoisson = function(
}
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
var argument = {
response_size: resp_size,
@ -264,9 +265,11 @@ BenchmarkClient.prototype.startPoisson = function(
*/
BenchmarkClient.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
var histogram = this.histogram;
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.histogram = new Histogram(histogram.resolution,
histogram.max_possible);
}
@ -281,9 +284,8 @@ BenchmarkClient.prototype.mark = function(reset) {
count: histogram.getCount()
},
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -132,7 +132,12 @@ function BenchmarkServer(host, port, tls, generic, response_size) {
server_creds = grpc.ServerCredentials.createInsecure();
}
var server = new grpc.Server();
var options = {
"grpc.max_receive_message_length": -1,
"grpc.max_send_message_length": -1
};
var server = new grpc.Server(options);
this.port = server.bind(host + ':' + port, server_creds);
if (generic) {
server.addService(genericService, {
@ -156,6 +161,7 @@ util.inherits(BenchmarkServer, EventEmitter);
BenchmarkServer.prototype.start = function() {
this.server.start();
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
this.emit('started');
};
@ -175,14 +181,15 @@ BenchmarkServer.prototype.getPort = function() {
*/
BenchmarkServer.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
}
return {
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -81,6 +81,7 @@ BenchmarkServer.prototype.start = function() {
var self = this;
this.server.listen(this.input_port, this.input_hostname, function() {
self.last_wall_time = process.hrtime();
self.last_usage = process.cpuUsage();
self.emit('started');
});
};
@ -91,14 +92,15 @@ BenchmarkServer.prototype.getPort = function() {
BenchmarkServer.prototype.mark = function(reset) {
var wall_time_diff = process.hrtime(this.last_wall_time);
var usage_diff = process.cpuUsage(this.last_usage);
if (reset) {
this.last_wall_time = process.hrtime();
this.last_usage = process.cpuUsage();
}
return {
time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
// Not sure how to measure these values
time_user: 0,
time_system: 0
time_user: usage_diff.user / 1000000,
time_system: usage_diff.system / 1000000
};
};

@ -315,7 +315,7 @@
}
- (void)dealloc {
grpc_call_destroy(_call);
grpc_call_unref(_call);
}
@end

@ -258,7 +258,7 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
@ -437,7 +437,7 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -65,7 +65,7 @@ static zend_object_handlers call_ce_handlers;
/* Frees and destroys an instance of wrapped_grpc_call */
PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_call)
if (p->owned && p->wrapped != NULL) {
grpc_call_destroy(p->wrapped);
grpc_call_unref(p->wrapped);
}
PHP_GRPC_FREE_WRAPPED_FUNC_END()

@ -87,7 +87,7 @@ class BaseStub
'ChannelCredentials::create methods');
}
if ($channel) {
if (!is_a($channel, 'Channel')) {
if (!is_a($channel, 'Grpc\Channel')) {
throw new \Exception('The channel argument is not a'.
'Channel object');
}

@ -11,7 +11,6 @@ dist/
.cache/
nosetests.xml
doc/
_grpcio_metadata.py
htmlcov/
grpc/_cython/_credentials
poison.c

@ -106,7 +106,7 @@ cdef class Call:
def __dealloc__(self):
if self.c_call != NULL:
grpc_call_destroy(self.c_call)
grpc_call_unref(self.c_call)
grpc_shutdown()
# The object *should* always be valid from Python. Used for debugging.

@ -329,7 +329,7 @@ cdef extern from "grpc/grpc.h":
const char *description,
void *reserved) nogil
char *grpc_call_get_peer(grpc_call *call) nogil
void grpc_call_destroy(grpc_call *call) nogil
void grpc_call_unref(grpc_call *call) nogil
grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_args *args,

@ -0,0 +1,32 @@
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!!
__version__ = """1.4.0.dev0"""

@ -0,0 +1,69 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Prompted by and minimal repro of https://github.com/grpc/grpc/issues/10658
require_relative './end2end_common'
def main
server_port = ''
OptionParser.new do |opts|
opts.on('--client_control_port=P', String) do
STDERR.puts 'client control port not used'
end
opts.on('--server_port=P', String) do |p|
server_port = p
end
end.parse!
p = fork do
stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
:this_channel_is_insecure)
stub.echo(Echo::EchoRequest.new(request: 'hello'))
end
begin
Timeout.timeout(10) do
Process.wait(p)
end
rescue Timeout::Error
STDERR.puts "timeout waiting for forked process #{p}"
Process.kill('SIGKILL', p)
Process.wait(p)
raise 'Timed out waiting for client process. ' \
'It likely hangs when using gRPC after loading it and then forking'
end
client_exit_code = $CHILD_STATUS
fail "forked process failed #{client_exit_code}" if client_exit_code != 0
end
main

@ -0,0 +1,69 @@
#!/usr/bin/env ruby
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require_relative './end2end_common'
def main
STDERR.puts 'start server'
server_runner = ServerRunner.new(EchoServerImpl)
server_port = server_runner.run
# TODO(apolcyn) Can we get rid of this sleep?
# Without it, an immediate call to the just started EchoServer
# fails with UNAVAILABLE
sleep 1
STDERR.puts 'start client'
_, client_pid = start_client('forking_client_client.rb',
server_port)
begin
Timeout.timeout(10) do
Process.wait(client_pid)
end
rescue Timeout::Error
STDERR.puts "timeout wait for client pid #{client_pid}"
Process.kill('SIGKILL', client_pid)
Process.wait(client_pid)
STDERR.puts 'killed client child'
raise 'Timed out waiting for client process. ' \
'It likely hangs when requiring grpc, then forking, then using grpc '
end
client_exit_code = $CHILD_STATUS
if client_exit_code != 0
fail "forking client client failed, exit code #{client_exit_code}"
end
server_runner.stop
end
main

@ -0,0 +1,77 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# For GRPC::Core classes, which use the grpc c-core, object init
# is interesting because it's related to overall library init.
require_relative './end2end_common'
def main
grpc_class = ''
OptionParser.new do |opts|
opts.on('--grpc_class=P', String) do |p|
grpc_class = p
end
end.parse!
test_proc = nil
case grpc_class
when 'channel'
test_proc = proc do
GRPC::Core::Channel.new('dummy_host', nil, :this_channel_is_insecure)
end
when 'server'
test_proc = proc do
GRPC::Core::Server.new({})
end
when 'channel_credentials'
test_proc = proc do
GRPC::Core::ChannelCredentials.new
end
when 'call_credentials'
test_proc = proc do
GRPC::Core::CallCredentials.new(proc { |noop| noop })
end
when 'compression_options'
test_proc = proc do
GRPC::Core::CompressionOptions.new
end
else
fail "bad --grpc_class=#{grpc_class} param"
end
th = Thread.new { test_proc.call }
test_proc.call
th.join
end
main

@ -0,0 +1,67 @@
#!/usr/bin/env ruby
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require_relative './end2end_common'
def main
native_grpc_classes = %w( channel
server
channel_credentials
call_credentials
compression_options )
native_grpc_classes.each do |grpc_class|
STDERR.puts 'start client'
this_dir = File.expand_path(File.dirname(__FILE__))
client_path = File.join(this_dir, 'grpc_class_init_client.rb')
client_pid = Process.spawn(RbConfig.ruby,
client_path,
"--grpc_class=#{grpc_class}")
begin
Timeout.timeout(10) do
Process.wait(client_pid)
end
rescue Timeout::Error
STDERR.puts "timeout waiting for client pid #{client_pid}"
Process.kill('SIGKILL', client_pid)
Process.wait(client_pid)
STDERR.puts 'killed client child'
raise 'Timed out waiting for client process. ' \
'It likely hangs when the first constructed gRPC object has ' \
"type: #{grpc_class}"
end
client_exit_code = $CHILD_STATUS
fail "client failed, exit code #{client_exit_code}" if client_exit_code != 0
end
end
main

@ -33,15 +33,15 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_byte_buffer.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/grpc.h>
#include <grpc/byte_buffer_reader.h>
#include <grpc/grpc.h>
#include <grpc/slice.h>
#include "rb_grpc.h"
grpc_byte_buffer* grpc_rb_s_to_byte_buffer(char *string, size_t length) {
grpc_byte_buffer *grpc_rb_s_to_byte_buffer(char *string, size_t length) {
grpc_slice slice = grpc_slice_from_copied_buffer(string, length);
grpc_byte_buffer *buffer = grpc_raw_byte_buffer_create(&slice, 1);
grpc_slice_unref(slice);
@ -61,7 +61,7 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) {
return Qnil;
}
while (grpc_byte_buffer_reader_next(&reader, &next) != 0) {
rb_str_cat(rb_string, (const char *) GRPC_SLICE_START_PTR(next),
rb_str_cat(rb_string, (const char *)GRPC_SLICE_START_PTR(next),
GRPC_SLICE_LENGTH(next));
grpc_slice_unref(next);
}
@ -71,7 +71,9 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) {
VALUE grpc_rb_slice_to_ruby_string(grpc_slice slice) {
if (GRPC_SLICE_START_PTR(slice) == NULL) {
rb_raise(rb_eRuntimeError, "attempt to convert uninitialized grpc_slice to ruby string");
rb_raise(rb_eRuntimeError,
"attempt to convert uninitialized grpc_slice to ruby string");
}
return rb_str_new((char*)GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
return rb_str_new((char *)GRPC_SLICE_START_PTR(slice),
GRPC_SLICE_LENGTH(slice));
}

@ -33,12 +33,12 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_call.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/support/alloc.h>
#include "rb_byte_buffer.h"
#include "rb_call_credentials.h"
@ -101,7 +101,7 @@ typedef struct grpc_rb_call {
static void destroy_call(grpc_rb_call *call) {
/* Ensure that we only try to destroy the call once */
if (call->wrapped != NULL) {
grpc_call_destroy(call->wrapped);
grpc_call_unref(call->wrapped);
call->wrapped = NULL;
grpc_rb_completion_queue_destroy(call->queue);
call->queue = NULL;
@ -113,7 +113,7 @@ static void grpc_rb_call_destroy(void *p) {
if (p == NULL) {
return;
}
destroy_call((grpc_rb_call*)p);
destroy_call((grpc_rb_call *)p);
}
static size_t md_ary_datasize(const void *p) {
@ -130,7 +130,9 @@ static size_t md_ary_datasize(const void *p) {
static const rb_data_type_t grpc_rb_md_ary_data_type = {
"grpc_metadata_array",
{GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, md_ary_datasize,
{GRPC_RB_GC_NOT_MARKED,
GRPC_RB_GC_DONT_FREE,
md_ary_datasize,
{NULL, NULL}},
NULL,
NULL,
@ -140,19 +142,20 @@ static const rb_data_type_t grpc_rb_md_ary_data_type = {
* touches a hash object.
* TODO(yugui) Directly use st_table and call the free function earlier?
*/
0,
0,
#endif
};
/* Describes grpc_call struct for RTypedData */
static const rb_data_type_t grpc_call_data_type = {
"grpc_call",
{GRPC_RB_GC_NOT_MARKED, grpc_rb_call_destroy, GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL,
NULL,
static const rb_data_type_t grpc_call_data_type = {"grpc_call",
{GRPC_RB_GC_NOT_MARKED,
grpc_rb_call_destroy,
GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL,
NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
RUBY_TYPED_FREE_IMMEDIATELY
RUBY_TYPED_FREE_IMMEDIATELY
#endif
};
@ -175,7 +178,7 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
grpc_rb_call *call = NULL;
grpc_call_error err;
if (RTYPEDDATA_DATA(self) == NULL) {
//This call has been closed
// This call has been closed
return Qnil;
}
@ -196,7 +199,7 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
static VALUE grpc_rb_call_close(VALUE self) {
grpc_rb_call *call = NULL;
TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
if(call != NULL) {
if (call != NULL) {
destroy_call(call);
RTYPEDDATA_DATA(self) = NULL;
}
@ -238,8 +241,8 @@ static VALUE grpc_rb_call_get_peer_cert(VALUE self) {
}
{
grpc_auth_property_iterator it =
grpc_auth_context_find_properties_by_name(ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME);
grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(
ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME);
const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it);
if (prop == NULL) {
return Qnil;
@ -388,21 +391,22 @@ static int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
long i;
grpc_slice key_slice;
grpc_slice value_slice;
char* tmp_str;
char *tmp_str;
if (TYPE(key) == T_SYMBOL) {
key_slice = grpc_slice_from_static_string(rb_id2name(SYM2ID(key)));
} else if (TYPE(key) == T_STRING) {
key_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(key), RSTRING_LEN(key));
key_slice =
grpc_slice_from_copied_buffer(RSTRING_PTR(key), RSTRING_LEN(key));
} else {
rb_raise(rb_eTypeError, "grpc_rb_md_ary_fill_hash_cb: bad type for key parameter");
rb_raise(rb_eTypeError,
"grpc_rb_md_ary_fill_hash_cb: bad type for key parameter");
}
if (!grpc_header_key_is_legal(key_slice)) {
tmp_str = grpc_slice_to_c_string(key_slice);
rb_raise(rb_eArgError,
"'%s' is an invalid header key, must match [a-z0-9-_.]+",
tmp_str);
"'%s' is an invalid header key, must match [a-z0-9-_.]+", tmp_str);
return ST_STOP;
}
@ -414,13 +418,14 @@ static int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
array_length = RARRAY_LEN(val);
/* If the value is an array, add capacity for each value in the array */
for (i = 0; i < array_length; i++) {
value_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(rb_ary_entry(val, i)), RSTRING_LEN(rb_ary_entry(val, i)));
value_slice = grpc_slice_from_copied_buffer(
RSTRING_PTR(rb_ary_entry(val, i)), RSTRING_LEN(rb_ary_entry(val, i)));
if (!grpc_is_binary_header(key_slice) &&
!grpc_header_nonbin_value_is_legal(value_slice)) {
// The value has invalid characters
tmp_str = grpc_slice_to_c_string(value_slice);
rb_raise(rb_eArgError,
"Header value '%s' has invalid characters", tmp_str);
rb_raise(rb_eArgError, "Header value '%s' has invalid characters",
tmp_str);
return ST_STOP;
}
md_ary->metadata[md_ary->count].key = key_slice;
@ -428,21 +433,21 @@ static int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
md_ary->count += 1;
}
} else if (TYPE(val) == T_STRING) {
value_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(val), RSTRING_LEN(val));
value_slice =
grpc_slice_from_copied_buffer(RSTRING_PTR(val), RSTRING_LEN(val));
if (!grpc_is_binary_header(key_slice) &&
!grpc_header_nonbin_value_is_legal(value_slice)) {
// The value has invalid characters
tmp_str = grpc_slice_to_c_string(value_slice);
rb_raise(rb_eArgError,
"Header value '%s' has invalid characters", tmp_str);
rb_raise(rb_eArgError, "Header value '%s' has invalid characters",
tmp_str);
return ST_STOP;
}
md_ary->metadata[md_ary->count].key = key_slice;
md_ary->metadata[md_ary->count].value = value_slice;
md_ary->count += 1;
} else {
rb_raise(rb_eArgError,
"Header values must be of type string or array");
rb_raise(rb_eArgError, "Header values must be of type string or array");
return ST_STOP;
}
@ -474,8 +479,7 @@ static int grpc_rb_md_ary_capacity_hash_cb(VALUE key, VALUE val,
/* grpc_rb_md_ary_convert converts a ruby metadata hash into
a grpc_metadata_array.
*/
void grpc_rb_md_ary_convert(VALUE md_ary_hash,
grpc_metadata_array *md_ary) {
void grpc_rb_md_ary_convert(VALUE md_ary_hash, grpc_metadata_array *md_ary) {
VALUE md_ary_obj = Qnil;
if (md_ary_hash == Qnil) {
return; /* Do nothing if the expected has value is nil */
@ -511,12 +515,14 @@ VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary) {
rb_hash_aset(result, key, value);
} else if (TYPE(value) == T_ARRAY) {
/* Add the string to the returned array */
rb_ary_push(value, grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
rb_ary_push(value,
grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
} else {
/* Add the current value with this key and the new one to an array */
new_ary = rb_ary_new();
rb_ary_push(new_ary, value);
rb_ary_push(new_ary, grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
rb_ary_push(new_ary,
grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
rb_hash_aset(result, key, new_ary);
}
}
@ -556,10 +562,9 @@ static int grpc_rb_call_check_op_keys_hash_cb(VALUE key, VALUE val,
/* grpc_rb_op_update_status_from_server adds the values in a ruby status
struct to the 'send_status_from_server' portion of an op.
*/
static void grpc_rb_op_update_status_from_server(grpc_op *op,
grpc_metadata_array *md_ary,
grpc_slice *send_status_details,
VALUE status) {
static void grpc_rb_op_update_status_from_server(
grpc_op *op, grpc_metadata_array *md_ary, grpc_slice *send_status_details,
VALUE status) {
VALUE code = rb_struct_aref(status, sym_code);
VALUE details = rb_struct_aref(status, sym_details);
VALUE metadata_hash = rb_struct_aref(status, sym_metadata);
@ -576,7 +581,8 @@ static void grpc_rb_op_update_status_from_server(grpc_op *op,
return;
}
*send_status_details = grpc_slice_from_copied_buffer(RSTRING_PTR(details), RSTRING_LEN(details));
*send_status_details =
grpc_slice_from_copied_buffer(RSTRING_PTR(details), RSTRING_LEN(details));
op->data.send_status_from_server.status = NUM2INT(code);
op->data.send_status_from_server.status_details = send_status_details;
@ -687,7 +693,8 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) {
/* N.B. later there is no need to explicitly delete the metadata keys
* and values, they are references to data in ruby objects. */
grpc_rb_op_update_status_from_server(
&st->ops[st->op_num], &st->send_trailing_metadata, &st->send_status_details, this_value);
&st->ops[st->op_num], &st->send_trailing_metadata,
&st->send_status_details, this_value);
break;
case GRPC_OP_RECV_INITIAL_METADATA:
st->ops[st->op_num].data.recv_initial_metadata.recv_initial_metadata =
@ -749,12 +756,12 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
case GRPC_OP_RECV_STATUS_ON_CLIENT:
rb_struct_aset(
result, sym_status,
rb_struct_new(grpc_rb_sStatus, UINT2NUM(st->recv_status),
(GRPC_SLICE_START_PTR(st->recv_status_details) == NULL
? Qnil
: grpc_rb_slice_to_ruby_string(st->recv_status_details)),
grpc_rb_md_ary_to_h(&st->recv_trailing_metadata),
NULL));
rb_struct_new(
grpc_rb_sStatus, UINT2NUM(st->recv_status),
(GRPC_SLICE_START_PTR(st->recv_status_details) == NULL
? Qnil
: grpc_rb_slice_to_ruby_string(st->recv_status_details)),
grpc_rb_md_ary_to_h(&st->recv_trailing_metadata), NULL));
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
rb_struct_aset(result, sym_send_close, Qtrue);
@ -791,7 +798,7 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) {
VALUE result = Qnil;
VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
unsigned write_flag = 0;
void *tag = (void*)&st;
void *tag = (void *)&st;
if (RTYPEDDATA_DATA(self) == NULL) {
rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call");
@ -919,7 +926,8 @@ static void Init_grpc_op_codes() {
}
static void Init_grpc_metadata_keys() {
VALUE grpc_rb_mMetadataKeys = rb_define_module_under(grpc_rb_mGrpcCore, "MetadataKeys");
VALUE grpc_rb_mMetadataKeys =
rb_define_module_under(grpc_rb_mGrpcCore, "MetadataKeys");
rb_define_const(grpc_rb_mMetadataKeys, "COMPRESSION_REQUEST_ALGORITHM",
rb_str_new2(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY));
}

@ -39,13 +39,13 @@
#include <grpc/grpc.h>
/* Gets the wrapped call from a VALUE. */
grpc_call* grpc_rb_get_wrapped_call(VALUE v);
grpc_call *grpc_rb_get_wrapped_call(VALUE v);
/* Gets the VALUE corresponding to given grpc_call. */
VALUE grpc_rb_wrap_call(grpc_call *c, grpc_completion_queue *q);
/* Provides the details of an call error */
const char* grpc_call_error_detail_of(grpc_call_error err);
const char *grpc_call_error_detail_of(grpc_call_error err);
/* Converts a metadata array to a hash. */
VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary);
@ -53,8 +53,7 @@ VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary);
/* grpc_rb_md_ary_convert converts a ruby metadata hash into
a grpc_metadata_array.
*/
void grpc_rb_md_ary_convert(VALUE md_ary_hash,
grpc_metadata_array *md_ary);
void grpc_rb_md_ary_convert(VALUE md_ary_hash, grpc_metadata_array *md_ary);
/* grpc_rb_eCallError is the ruby class of the exception thrown during call
operations. */

@ -33,8 +33,8 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_call_credentials.h"
#include "rb_grpc_imports.generated.h"
#include <ruby/thread.h>
@ -82,20 +82,18 @@ static VALUE grpc_rb_call_credentials_callback(VALUE callback_args) {
static VALUE grpc_rb_call_credentials_callback_rescue(VALUE args,
VALUE exception_object) {
VALUE result = rb_hash_new();
VALUE backtrace = rb_funcall(
rb_funcall(exception_object, rb_intern("backtrace"), 0),
rb_intern("join"),
1, rb_str_new2("\n\tfrom "));
VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("inspect"), 0);
VALUE backtrace =
rb_funcall(rb_funcall(exception_object, rb_intern("backtrace"), 0),
rb_intern("join"), 1, rb_str_new2("\n\tfrom "));
VALUE rb_exception_info =
rb_funcall(exception_object, rb_intern("inspect"), 0);
(void)args;
gpr_log(GPR_INFO, "Call credentials callback failed: %s\n%s",
StringValueCStr(rb_exception_info),
StringValueCStr(backtrace));
StringValueCStr(rb_exception_info), StringValueCStr(backtrace));
rb_hash_aset(result, rb_str_new2("metadata"), Qnil);
rb_hash_aset(result, rb_str_new2("status"),
INT2NUM(GRPC_STATUS_UNAUTHENTICATED));
rb_hash_aset(result, rb_str_new2("details"),
rb_exception_info);
rb_hash_aset(result, rb_str_new2("details"), rb_exception_info);
return result;
}
@ -118,7 +116,8 @@ static void grpc_rb_call_credentials_callback_with_gil(void *param) {
result = rb_rescue(grpc_rb_call_credentials_callback, callback_args,
grpc_rb_call_credentials_callback_rescue, Qnil);
// Both callbacks return a hash, so result should be a hash
grpc_rb_md_ary_convert(rb_hash_aref(result, rb_str_new2("metadata")), &md_ary);
grpc_rb_md_ary_convert(rb_hash_aref(result, rb_str_new2("metadata")),
&md_ary);
status = NUM2INT(rb_hash_aref(result, rb_str_new2("status")));
details = rb_hash_aref(result, rb_str_new2("details"));
error_details = StringValueCStr(details);
@ -138,7 +137,7 @@ static void grpc_rb_call_credentials_plugin_get_metadata(
params->callback = cb;
grpc_rb_event_queue_enqueue(grpc_rb_call_credentials_callback_with_gil,
(void*)(params));
(void *)(params));
}
static void grpc_rb_call_credentials_plugin_destroy(void *state) {
@ -172,13 +171,15 @@ static void grpc_rb_call_credentials_mark(void *p) {
}
static rb_data_type_t grpc_rb_call_credentials_data_type = {
"grpc_call_credentials",
{grpc_rb_call_credentials_mark, grpc_rb_call_credentials_free,
GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
NULL,
NULL,
"grpc_call_credentials",
{grpc_rb_call_credentials_mark,
grpc_rb_call_credentials_free,
GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL,
NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
RUBY_TYPED_FREE_IMMEDIATELY
RUBY_TYPED_FREE_IMMEDIATELY
#endif
};
@ -188,7 +189,8 @@ static VALUE grpc_rb_call_credentials_alloc(VALUE cls) {
grpc_rb_call_credentials *wrapper = ALLOC(grpc_rb_call_credentials);
wrapper->wrapped = NULL;
wrapper->mark = Qnil;
return TypedData_Wrap_Struct(cls, &grpc_rb_call_credentials_data_type, wrapper);
return TypedData_Wrap_Struct(cls, &grpc_rb_call_credentials_data_type,
wrapper);
}
/* Creates a wrapping object for a given call credentials. This should only be
@ -221,6 +223,8 @@ static VALUE grpc_rb_call_credentials_init(VALUE self, VALUE proc) {
grpc_call_credentials *creds = NULL;
grpc_metadata_credentials_plugin plugin;
grpc_ruby_once_init();
TypedData_Get_Struct(self, grpc_rb_call_credentials,
&grpc_rb_call_credentials_data_type, wrapper);
@ -230,7 +234,7 @@ static VALUE grpc_rb_call_credentials_init(VALUE self, VALUE proc) {
rb_raise(rb_eTypeError, "Argument to CallCredentials#new must be a proc");
return Qnil;
}
plugin.state = (void*)proc;
plugin.state = (void *)proc;
plugin.type = "";
creds = grpc_metadata_credentials_create_from_plugin(plugin, NULL);
@ -281,15 +285,12 @@ void Init_grpc_call_credentials() {
grpc_rb_call_credentials_compose, -1);
id_callback = rb_intern("__callback");
grpc_rb_event_queue_thread_start();
}
/* Gets the wrapped grpc_call_credentials from the ruby wrapper */
grpc_call_credentials *grpc_rb_get_wrapped_call_credentials(VALUE v) {
grpc_rb_call_credentials *wrapper = NULL;
TypedData_Get_Struct(v, grpc_rb_call_credentials,
&grpc_rb_call_credentials_data_type,
wrapper);
&grpc_rb_call_credentials_data_type, wrapper);
return wrapper->wrapped;
}

@ -34,9 +34,9 @@
#include <ruby/ruby.h>
#include <ruby/thread.h>
#include "rb_grpc_imports.generated.h"
#include "rb_byte_buffer.h"
#include "rb_channel.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
@ -89,10 +89,14 @@ typedef struct grpc_rb_channel {
static void grpc_rb_channel_try_register_connection_polling(
grpc_rb_channel *wrapper);
static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper);
static void *wait_until_channel_polling_thread_started_no_gil(void *);
static void wait_until_channel_polling_thread_started_unblocking_func(void *);
static grpc_completion_queue *channel_polling_cq;
static gpr_mu global_connection_polling_mu;
static gpr_cv global_connection_polling_cv;
static int abort_channel_polling = 0;
static int channel_polling_thread_started = 0;
/* Destroys Channel instances. */
static void grpc_rb_channel_free(void *p) {
@ -166,6 +170,11 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
grpc_channel_args args;
MEMZERO(&args, grpc_channel_args, 1);
grpc_ruby_once_init();
rb_thread_call_without_gvl(
wait_until_channel_polling_thread_started_no_gil, NULL,
wait_until_channel_polling_thread_started_unblocking_func, NULL);
/* "3" == 3 mandatory args */
rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
@ -196,14 +205,14 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
gpr_mu_lock(&wrapper->channel_mu);
wrapper->abort_watch_connectivity_state = 0;
wrapper->current_connectivity_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
wrapper->current_connectivity_state =
grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
wrapper->safe_to_destroy = 0;
wrapper->request_safe_destroy = 0;
gpr_cv_broadcast(&wrapper->channel_cv);
gpr_mu_unlock(&wrapper->channel_mu);
grpc_rb_channel_try_register_connection_polling(wrapper);
if (args.args != NULL) {
@ -245,7 +254,8 @@ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE *argv,
rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
}
return LONG2NUM(grpc_channel_check_connectivity_state(wrapper->wrapped, grpc_try_to_connect));
return LONG2NUM(grpc_channel_check_connectivity_state(wrapper->wrapped,
grpc_try_to_connect));
}
typedef struct watch_state_stack {
@ -255,22 +265,21 @@ typedef struct watch_state_stack {
} watch_state_stack;
static void *watch_channel_state_without_gvl(void *arg) {
watch_state_stack *stack = (watch_state_stack*)arg;
watch_state_stack *stack = (watch_state_stack *)arg;
gpr_timespec deadline = stack->deadline;
grpc_rb_channel *wrapper = stack->wrapper;
int last_state = stack->last_state;
void *return_value = (void*)0;
void *return_value = (void *)0;
gpr_mu_lock(&wrapper->channel_mu);
while(wrapper->current_connectivity_state == last_state &&
!wrapper->request_safe_destroy &&
!wrapper->safe_to_destroy &&
!wrapper->abort_watch_connectivity_state &&
gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) {
while (wrapper->current_connectivity_state == last_state &&
!wrapper->request_safe_destroy && !wrapper->safe_to_destroy &&
!wrapper->abort_watch_connectivity_state &&
gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) {
gpr_cv_wait(&wrapper->channel_cv, &wrapper->channel_mu, deadline);
}
if (wrapper->current_connectivity_state != last_state) {
return_value = (void*)1;
return_value = (void *)1;
}
gpr_mu_unlock(&wrapper->channel_mu);
@ -278,7 +287,7 @@ static void *watch_channel_state_without_gvl(void *arg) {
}
static void watch_channel_state_unblocking_func(void *arg) {
grpc_rb_channel *wrapper = (grpc_rb_channel*)arg;
grpc_rb_channel *wrapper = (grpc_rb_channel *)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: watch channel state unblocking func called");
gpr_mu_lock(&wrapper->channel_mu);
wrapper->abort_watch_connectivity_state = 1;
@ -298,7 +307,7 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
VALUE deadline) {
grpc_rb_channel *wrapper = NULL;
watch_state_stack stack;
void* out;
void *out;
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
@ -308,14 +317,18 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
}
if (!FIXNUM_P(last_state)) {
rb_raise(rb_eTypeError, "bad type for last_state. want a GRPC::Core::ChannelState constant");
rb_raise(
rb_eTypeError,
"bad type for last_state. want a GRPC::Core::ChannelState constant");
return Qnil;
}
stack.wrapper = wrapper;
stack.deadline = grpc_rb_time_timeval(deadline, 0);
stack.last_state = NUM2LONG(last_state);
out = rb_thread_call_without_gvl(watch_channel_state_without_gvl, &stack, watch_channel_state_unblocking_func, wrapper);
out =
rb_thread_call_without_gvl(watch_channel_state_without_gvl, &stack,
watch_channel_state_unblocking_func, wrapper);
if (out) {
return Qtrue;
}
@ -420,7 +433,7 @@ static VALUE grpc_rb_channel_get_target(VALUE self) {
// destroy.
// Not safe to call while a channel's connection state is polled.
static void grpc_rb_channel_try_register_connection_polling(
grpc_rb_channel *wrapper) {
grpc_rb_channel *wrapper) {
grpc_connectivity_state conn_state;
gpr_timespec sleep_time = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(20, GPR_TIMESPAN));
@ -436,6 +449,7 @@ static void grpc_rb_channel_try_register_connection_polling(
}
gpr_mu_lock(&global_connection_polling_mu);
GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
conn_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
if (conn_state != wrapper->current_connectivity_state) {
wrapper->current_connectivity_state = conn_state;
@ -469,7 +483,7 @@ static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) {
}
// Note this loop breaks out with a single call of
// "grpc_rb_event_unblocking_func".
// "run_poll_channels_loop_no_gil".
// This assumes that a ruby call the unblocking func
// indicates process shutdown.
// In the worst case, this stops polling channel connectivity
@ -477,6 +491,14 @@ static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) {
static void *run_poll_channels_loop_no_gil(void *arg) {
grpc_event event;
(void)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
gpr_mu_lock(&global_connection_polling_mu);
GPR_ASSERT(!channel_polling_thread_started);
channel_polling_thread_started = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
for (;;) {
event = grpc_completion_queue_next(
channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
@ -484,19 +506,24 @@ static void *run_poll_channels_loop_no_gil(void *arg) {
break;
}
if (event.type == GRPC_OP_COMPLETE) {
grpc_rb_channel_try_register_connection_polling((grpc_rb_channel *)event.tag);
grpc_rb_channel_try_register_connection_polling(
(grpc_rb_channel *)event.tag);
}
}
grpc_completion_queue_destroy(channel_polling_cq);
gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling loop");
gpr_log(GPR_DEBUG,
"GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
"loop");
return NULL;
}
// Notify the channel polling loop to cleanup and shutdown.
static void grpc_rb_event_unblocking_func(void *arg) {
static void run_poll_channels_loop_unblocking_func(void *arg) {
(void)arg;
gpr_mu_lock(&global_connection_polling_mu);
gpr_log(GPR_DEBUG, "GRPC_RUBY: grpc_rb_event_unblocking_func - begin aborting connection polling");
gpr_log(GPR_DEBUG,
"GRPC_RUBY: grpc_rb_event_unblocking_func - begin aborting "
"connection polling");
abort_channel_polling = 1;
grpc_completion_queue_shutdown(channel_polling_cq);
gpr_mu_unlock(&global_connection_polling_mu);
@ -505,12 +532,41 @@ static void grpc_rb_event_unblocking_func(void *arg) {
// Poll channel connectivity states in background thread without the GIL.
static VALUE run_poll_channels_loop(VALUE arg) {
(void)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
gpr_log(
GPR_DEBUG,
"GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
grpc_rb_event_unblocking_func, NULL);
run_poll_channels_loop_unblocking_func, NULL);
return Qnil;
}
static void *wait_until_channel_polling_thread_started_no_gil(void *arg) {
(void)arg;
gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
gpr_mu_lock(&global_connection_polling_mu);
while (!channel_polling_thread_started && !abort_channel_polling) {
gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&global_connection_polling_mu);
return NULL;
}
static void wait_until_channel_polling_thread_started_unblocking_func(
void *arg) {
(void)arg;
gpr_mu_lock(&global_connection_polling_mu);
gpr_log(GPR_DEBUG,
"GRPC_RUBY: "
"wait_until_channel_polling_thread_started_unblocking_func - begin "
"aborting connection polling");
abort_channel_polling = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
}
/* Temporary fix for
* https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
* Transports in idle channels can get destroyed. Normally c-core re-connects,
@ -521,11 +577,26 @@ static VALUE run_poll_channels_loop(VALUE arg) {
* calls - so that c-core can reconnect if needed, when there aren't any RPC's.
* TODO(apolcyn) remove this when core handles new RPCs on dead connections.
*/
static void start_poll_channels_loop() {
channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
void grpc_rb_channel_polling_thread_start() {
VALUE background_thread = Qnil;
GPR_ASSERT(!abort_channel_polling);
GPR_ASSERT(!channel_polling_thread_started);
GPR_ASSERT(channel_polling_cq == NULL);
gpr_mu_init(&global_connection_polling_mu);
abort_channel_polling = 0;
rb_thread_create(run_poll_channels_loop, NULL);
gpr_cv_init(&global_connection_polling_cv);
channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
background_thread = rb_thread_create(run_poll_channels_loop, NULL);
if (!RTEST(background_thread)) {
gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
gpr_mu_lock(&global_connection_polling_mu);
abort_channel_polling = 1;
gpr_cv_broadcast(&global_connection_polling_cv);
gpr_mu_unlock(&global_connection_polling_mu);
}
}
static void Init_grpc_propagate_masks() {
@ -597,7 +668,6 @@ void Init_grpc_channel() {
id_insecure_channel = rb_intern("this_channel_is_insecure");
Init_grpc_propagate_masks();
Init_grpc_connectivity_states();
start_poll_channels_loop();
}
/* Gets the wrapped channel from the ruby wrapper */

@ -41,6 +41,8 @@
/* Initializes the Channel class. */
void Init_grpc_channel();
void grpc_rb_channel_polling_thread_start();
/* Gets the wrapped channel from the ruby wrapper */
grpc_channel* grpc_rb_get_wrapped_channel(VALUE v);

@ -33,8 +33,8 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_channel_args.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/grpc.h>
@ -42,9 +42,12 @@
static rb_data_type_t grpc_rb_channel_args_data_type = {
"grpc_channel_args",
{GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE,
{GRPC_RB_GC_NOT_MARKED,
GRPC_RB_GC_DONT_FREE,
GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL, NULL,
NULL,
NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
RUBY_TYPED_FREE_IMMEDIATELY
#endif
@ -137,11 +140,10 @@ static VALUE grpc_rb_hash_convert_to_channel_args0(VALUE as_value) {
params->dst->num_args = num_args;
params->dst->args = ALLOC_N(grpc_arg, num_args);
MEMZERO(params->dst->args, grpc_arg, num_args);
rb_hash_foreach(params->src_hash,
grpc_rb_channel_create_in_process_add_args_hash_cb,
TypedData_Wrap_Struct(grpc_rb_cChannelArgs,
&grpc_rb_channel_args_data_type,
params->dst));
rb_hash_foreach(
params->src_hash, grpc_rb_channel_create_in_process_add_args_hash_cb,
TypedData_Wrap_Struct(grpc_rb_cChannelArgs,
&grpc_rb_channel_args_data_type, params->dst));
/* reset num_args as grpc_rb_channel_create_in_process_add_args_hash_cb
* decrements it during has processing */
params->dst->num_args = num_args;
@ -157,7 +159,7 @@ void grpc_rb_hash_convert_to_channel_args(VALUE src_hash,
/* Make a protected call to grpc_rb_hash_convert_channel_args */
params.src_hash = src_hash;
params.dst = dst;
rb_protect(grpc_rb_hash_convert_to_channel_args0, (VALUE) & params, &status);
rb_protect(grpc_rb_hash_convert_to_channel_args0, (VALUE)&params, &status);
if (status != 0) {
if (dst->args != NULL) {
/* Free any allocated memory before propagating the error */

@ -35,8 +35,8 @@
#include <string.h>
#include "rb_grpc_imports.generated.h"
#include "rb_channel_credentials.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
@ -91,8 +91,10 @@ static void grpc_rb_channel_credentials_mark(void *p) {
static rb_data_type_t grpc_rb_channel_credentials_data_type = {
"grpc_channel_credentials",
{grpc_rb_channel_credentials_mark, grpc_rb_channel_credentials_free,
GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
{grpc_rb_channel_credentials_mark,
grpc_rb_channel_credentials_free,
GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL,
NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
@ -106,13 +108,15 @@ static VALUE grpc_rb_channel_credentials_alloc(VALUE cls) {
grpc_rb_channel_credentials *wrapper = ALLOC(grpc_rb_channel_credentials);
wrapper->wrapped = NULL;
wrapper->mark = Qnil;
return TypedData_Wrap_Struct(cls, &grpc_rb_channel_credentials_data_type, wrapper);
return TypedData_Wrap_Struct(cls, &grpc_rb_channel_credentials_data_type,
wrapper);
}
/* Creates a wrapping object for a given channel credentials. This should only
* be called with grpc_channel_credentials objects that are not already
* associated with any Ruby object. */
VALUE grpc_rb_wrap_channel_credentials(grpc_channel_credentials *c, VALUE mark) {
VALUE grpc_rb_wrap_channel_credentials(grpc_channel_credentials *c,
VALUE mark) {
VALUE rb_wrapper;
grpc_rb_channel_credentials *wrapper;
if (c == NULL) {
@ -147,7 +151,8 @@ static ID id_pem_cert_chain;
pem_private_key: (optional) PEM encoding of the client's private key
pem_cert_chain: (optional) PEM encoding of the client's cert chain
Initializes Credential instances. */
static VALUE grpc_rb_channel_credentials_init(int argc, VALUE *argv, VALUE self) {
static VALUE grpc_rb_channel_credentials_init(int argc, VALUE *argv,
VALUE self) {
VALUE pem_root_certs = Qnil;
VALUE pem_private_key = Qnil;
VALUE pem_cert_chain = Qnil;
@ -156,6 +161,9 @@ static VALUE grpc_rb_channel_credentials_init(int argc, VALUE *argv, VALUE self)
grpc_ssl_pem_key_cert_pair key_cert_pair;
const char *pem_root_certs_cstr = NULL;
MEMZERO(&key_cert_pair, grpc_ssl_pem_key_cert_pair, 1);
grpc_ruby_once_init();
/* "03" == no mandatory arg, 3 optional */
rb_scan_args(argc, argv, "03", &pem_root_certs, &pem_private_key,
&pem_cert_chain);
@ -170,8 +178,8 @@ static VALUE grpc_rb_channel_credentials_init(int argc, VALUE *argv, VALUE self)
} else {
key_cert_pair.private_key = RSTRING_PTR(pem_private_key);
key_cert_pair.cert_chain = RSTRING_PTR(pem_cert_chain);
creds = grpc_ssl_credentials_create(pem_root_certs_cstr,
&key_cert_pair, NULL);
creds =
grpc_ssl_credentials_create(pem_root_certs_cstr, &key_cert_pair, NULL);
}
if (creds == NULL) {
rb_raise(rb_eRuntimeError, "could not create a credentials, not sure why");
@ -230,8 +238,8 @@ static VALUE grpc_rb_set_default_roots_pem(VALUE self, VALUE roots) {
}
void Init_grpc_channel_credentials() {
grpc_rb_cChannelCredentials =
rb_define_class_under(grpc_rb_mGrpcCore, "ChannelCredentials", rb_cObject);
grpc_rb_cChannelCredentials = rb_define_class_under(
grpc_rb_mGrpcCore, "ChannelCredentials", rb_cObject);
/* Allocates an object managed by the ruby runtime */
rb_define_alloc_func(grpc_rb_cChannelCredentials,
@ -259,7 +267,6 @@ void Init_grpc_channel_credentials() {
grpc_channel_credentials *grpc_rb_get_wrapped_channel_credentials(VALUE v) {
grpc_rb_channel_credentials *wrapper = NULL;
TypedData_Get_Struct(v, grpc_rb_channel_credentials,
&grpc_rb_channel_credentials_data_type,
wrapper);
&grpc_rb_channel_credentials_data_type, wrapper);
return wrapper->wrapped;
}

@ -33,14 +33,14 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_completion_queue.h"
#include "rb_grpc_imports.generated.h"
#include <ruby/thread.h>
#include <grpc/grpc.h>
#include <grpc/support/time.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "rb_grpc.h"
/* Used to allow grpc_completion_queue_next call to release the GIL */
@ -54,14 +54,13 @@ typedef struct next_call_stack {
/* Calls grpc_completion_queue_pluck without holding the ruby GIL */
static void *grpc_rb_completion_queue_pluck_no_gil(void *param) {
next_call_stack *const next_call = (next_call_stack*)param;
next_call_stack *const next_call = (next_call_stack *)param;
gpr_timespec increment = gpr_time_from_millis(20, GPR_TIMESPAN);
gpr_timespec deadline;
do {
deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment);
next_call->event = grpc_completion_queue_pluck(next_call->cq,
next_call->tag,
deadline, NULL);
next_call->event = grpc_completion_queue_pluck(
next_call->cq, next_call->tag, deadline, NULL);
if (next_call->event.type != GRPC_QUEUE_TIMEOUT ||
gpr_time_cmp(deadline, next_call->timeout) > 0) {
break;
@ -81,7 +80,7 @@ void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq) {
}
static void unblock_func(void *param) {
next_call_stack *const next_call = (next_call_stack*)param;
next_call_stack *const next_call = (next_call_stack *)param;
next_call->interrupted = 1;
}
@ -111,7 +110,6 @@ grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag,
(void *)&next_call);
/* If an interrupt prevented pluck from returning useful information, then
any plucks that did complete must have timed out */
} while (next_call.interrupted &&
next_call.event.type == GRPC_QUEUE_TIMEOUT);
} while (next_call.interrupted && next_call.event.type == GRPC_QUEUE_TIMEOUT);
return next_call.event;
}

@ -33,15 +33,15 @@
#include <ruby/ruby.h>
#include "rb_compression_options.h"
#include "rb_byte_buffer.h"
#include "rb_compression_options.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/alloc.h>
#include <string.h>
#include "rb_grpc.h"
@ -100,8 +100,11 @@ static rb_data_type_t grpc_rb_compression_options_data_type = {
Allocate the wrapped grpc compression options and
initialize it here too. */
static VALUE grpc_rb_compression_options_alloc(VALUE cls) {
grpc_rb_compression_options *wrapper =
gpr_malloc(sizeof(grpc_rb_compression_options));
grpc_rb_compression_options *wrapper = NULL;
grpc_ruby_once_init();
wrapper = gpr_malloc(sizeof(grpc_rb_compression_options));
wrapper->wrapped = NULL;
wrapper->wrapped = gpr_malloc(sizeof(grpc_compression_options));
grpc_compression_options_init(wrapper->wrapped);
@ -179,15 +182,16 @@ void grpc_rb_compression_options_algorithm_name_to_value_internal(
* correct C string out of it. */
algorithm_name_as_string = rb_funcall(algorithm_name, rb_intern("to_s"), 0);
name_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(algorithm_name_as_string), RSTRING_LEN(algorithm_name_as_string));
name_slice =
grpc_slice_from_copied_buffer(RSTRING_PTR(algorithm_name_as_string),
RSTRING_LEN(algorithm_name_as_string));
/* Raise an error if the name isn't recognized as a compression algorithm by
* the algorithm parse function
* in GRPC core. */
if(!grpc_compression_algorithm_parse(name_slice, algorithm_value)) {
if (!grpc_compression_algorithm_parse(name_slice, algorithm_value)) {
tmp_str = grpc_slice_to_c_string(name_slice);
rb_raise(rb_eNameError, "Invalid compression algorithm name: %s",
tmp_str);
rb_raise(rb_eNameError, "Invalid compression algorithm name: %s", tmp_str);
}
grpc_slice_unref(name_slice);

@ -33,20 +33,20 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_event_thread.h"
#include "rb_grpc_imports.generated.h"
#include <stdbool.h>
#include <ruby/thread.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include <grpc/support/log.h>
#include <ruby/thread.h>
typedef struct grpc_rb_event {
// callback will be called with argument while holding the GVL
void (*callback)(void*);
void (*callback)(void *);
void *argument;
struct grpc_rb_event *next;
@ -65,8 +65,7 @@ typedef struct grpc_rb_event_queue {
static grpc_rb_event_queue event_queue;
void grpc_rb_event_queue_enqueue(void (*callback)(void*),
void *argument) {
void grpc_rb_event_queue_enqueue(void (*callback)(void *), void *argument) {
grpc_rb_event *event = gpr_malloc(sizeof(grpc_rb_event));
event->callback = callback;
event->argument = argument;
@ -107,8 +106,7 @@ static void *grpc_rb_wait_for_event_no_gil(void *param) {
(void)param;
gpr_mu_lock(&event_queue.mu);
while ((event = grpc_rb_event_queue_dequeue()) == NULL) {
gpr_cv_wait(&event_queue.cv,
&event_queue.mu,
gpr_cv_wait(&event_queue.cv, &event_queue.mu,
gpr_inf_future(GPR_CLOCK_REALTIME));
if (event_queue.abort) {
gpr_mu_unlock(&event_queue.mu);
@ -132,10 +130,10 @@ static void grpc_rb_event_unblocking_func(void *arg) {
static VALUE grpc_rb_event_thread(VALUE arg) {
grpc_rb_event *event;
(void)arg;
while(true) {
event = (grpc_rb_event*)rb_thread_call_without_gvl(
grpc_rb_wait_for_event_no_gil, NULL,
grpc_rb_event_unblocking_func, NULL);
while (true) {
event = (grpc_rb_event *)rb_thread_call_without_gvl(
grpc_rb_wait_for_event_no_gil, NULL, grpc_rb_event_unblocking_func,
NULL);
if (event == NULL) {
// Indicates that the thread needs to shut down
break;

@ -33,5 +33,4 @@
void grpc_rb_event_queue_thread_start();
void grpc_rb_event_queue_enqueue(void (*callback)(void*),
void *argument);
void grpc_rb_event_queue_enqueue(void (*callback)(void *), void *argument);

@ -33,8 +33,8 @@
#include <ruby/ruby.h>
#include "rb_grpc_imports.generated.h"
#include "rb_grpc.h"
#include "rb_grpc_imports.generated.h"
#include <math.h>
#include <ruby/vm.h>
@ -46,16 +46,19 @@
#include "rb_call_credentials.h"
#include "rb_channel.h"
#include "rb_channel_credentials.h"
#include "rb_compression_options.h"
#include "rb_event_thread.h"
#include "rb_loader.h"
#include "rb_server.h"
#include "rb_server_credentials.h"
#include "rb_compression_options.h"
static VALUE grpc_rb_cTimeVal = Qnil;
static rb_data_type_t grpc_rb_timespec_data_type = {
"gpr_timespec",
{GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE,
{GRPC_RB_GC_NOT_MARKED,
GRPC_RB_GC_DONT_FREE,
GRPC_RB_MEMSIZE_UNAVAILABLE,
{NULL, NULL}},
NULL,
NULL,
@ -84,8 +87,7 @@ VALUE grpc_rb_cannot_init(VALUE self) {
/* Init/Clone func that fails by raising an exception. */
VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self) {
(void)self;
rb_raise(rb_eTypeError,
"Copy initialization of %s is not supported",
rb_raise(rb_eTypeError, "Copy initialization of %s is not supported",
rb_obj_classname(copy));
return Qnil;
}
@ -143,8 +145,7 @@ gpr_timespec grpc_rb_time_timeval(VALUE time, int interval) {
}
t.tv_sec = (int64_t)f;
if (f != t.tv_sec) {
rb_raise(rb_eRangeError, "%f out of Time range",
RFLOAT_VALUE(time));
rb_raise(rb_eRangeError, "%f out of Time range", RFLOAT_VALUE(time));
}
t.tv_nsec = (int)(d * 1e9 + 0.5);
}
@ -269,9 +270,7 @@ static void Init_grpc_time_consts() {
id_tv_nsec = rb_intern("tv_nsec");
}
static void grpc_rb_shutdown(void) {
grpc_shutdown();
}
static void grpc_rb_shutdown(void) { grpc_shutdown(); }
/* Initialize the GRPC module structs */
@ -291,17 +290,14 @@ VALUE sym_metadata = Qundef;
static gpr_once g_once_init = GPR_ONCE_INIT;
static void grpc_ruby_once_init() {
static void grpc_ruby_once_init_internal() {
grpc_init();
grpc_rb_event_queue_thread_start();
grpc_rb_channel_polling_thread_start();
atexit(grpc_rb_shutdown);
}
void Init_grpc_c() {
if (!grpc_rb_load_core()) {
rb_raise(rb_eLoadError, "Couldn't find or load gRPC's dynamic C core");
return;
}
void grpc_ruby_once_init() {
/* ruby_vm_at_exit doesn't seem to be working. It would crash once every
* blue moon, and some users are getting it repeatedly. See the discussions
* - https://github.com/grpc/grpc/pull/5337
@ -312,13 +308,19 @@ void Init_grpc_c() {
* then loaded again by another VM within the same process, we need to
* schedule our initialization and destruction only once.
*/
gpr_once_init(&g_once_init, grpc_ruby_once_init);
gpr_once_init(&g_once_init, grpc_ruby_once_init_internal);
}
void Init_grpc_c() {
if (!grpc_rb_load_core()) {
rb_raise(rb_eLoadError, "Couldn't find or load gRPC's dynamic C core");
return;
}
grpc_rb_mGRPC = rb_define_module("GRPC");
grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");
grpc_rb_sNewServerRpc =
rb_struct_define("NewServerRpc", "method", "host",
"deadline", "metadata", "call", NULL);
grpc_rb_sNewServerRpc = rb_struct_define(
"NewServerRpc", "method", "host", "deadline", "metadata", "call", NULL);
grpc_rb_sStatus =
rb_struct_define("Status", "code", "details", "metadata", NULL);
sym_code = ID2SYM(rb_intern("code"));

@ -34,8 +34,8 @@
#ifndef GRPC_RB_H_
#define GRPC_RB_H_
#include <sys/time.h>
#include <ruby/ruby.h>
#include <sys/time.h>
#include <grpc/support/time.h>
@ -68,7 +68,7 @@ extern VALUE sym_metadata;
/* GRPC_RB_MEMSIZE_UNAVAILABLE is used in rb_data_type_t to indicate that the
* number of bytes used by the wrapped struct is not available. */
#define GRPC_RB_MEMSIZE_UNAVAILABLE (size_t (*)(const void*))(NULL)
#define GRPC_RB_MEMSIZE_UNAVAILABLE (size_t(*)(const void*))(NULL)
/* A ruby object alloc func that fails by raising an exception. */
VALUE grpc_rb_cannot_alloc(VALUE cls);
@ -82,4 +82,6 @@ VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self);
/* grpc_rb_time_timeval creates a gpr_timespec from a ruby time object. */
gpr_timespec grpc_rb_time_timeval(VALUE time, int interval);
void grpc_ruby_once_init();
#endif /* GRPC_RB_H_ */

@ -108,6 +108,7 @@ grpc_channel_create_call_type grpc_channel_create_call_import;
grpc_channel_ping_type grpc_channel_ping_import;
grpc_channel_register_call_type grpc_channel_register_call_import;
grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
grpc_call_arena_alloc_type grpc_call_arena_alloc_import;
grpc_call_start_batch_type grpc_call_start_batch_import;
grpc_call_get_peer_type grpc_call_get_peer_import;
grpc_census_call_set_context_type grpc_census_call_set_context_import;
@ -119,7 +120,8 @@ grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import;
grpc_channel_destroy_type grpc_channel_destroy_import;
grpc_call_cancel_type grpc_call_cancel_import;
grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
grpc_call_destroy_type grpc_call_destroy_import;
grpc_call_ref_type grpc_call_ref_import;
grpc_call_unref_type grpc_call_unref_import;
grpc_server_request_call_type grpc_server_request_call_import;
grpc_server_register_method_type grpc_server_register_method_import;
grpc_server_request_registered_call_type grpc_server_request_registered_call_import;
@ -404,6 +406,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping");
grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call");
grpc_channel_create_registered_call_import = (grpc_channel_create_registered_call_type) GetProcAddress(library, "grpc_channel_create_registered_call");
grpc_call_arena_alloc_import = (grpc_call_arena_alloc_type) GetProcAddress(library, "grpc_call_arena_alloc");
grpc_call_start_batch_import = (grpc_call_start_batch_type) GetProcAddress(library, "grpc_call_start_batch");
grpc_call_get_peer_import = (grpc_call_get_peer_type) GetProcAddress(library, "grpc_call_get_peer");
grpc_census_call_set_context_import = (grpc_census_call_set_context_type) GetProcAddress(library, "grpc_census_call_set_context");
@ -415,7 +418,8 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_channel_destroy_import = (grpc_channel_destroy_type) GetProcAddress(library, "grpc_channel_destroy");
grpc_call_cancel_import = (grpc_call_cancel_type) GetProcAddress(library, "grpc_call_cancel");
grpc_call_cancel_with_status_import = (grpc_call_cancel_with_status_type) GetProcAddress(library, "grpc_call_cancel_with_status");
grpc_call_destroy_import = (grpc_call_destroy_type) GetProcAddress(library, "grpc_call_destroy");
grpc_call_ref_import = (grpc_call_ref_type) GetProcAddress(library, "grpc_call_ref");
grpc_call_unref_import = (grpc_call_unref_type) GetProcAddress(library, "grpc_call_unref");
grpc_server_request_call_import = (grpc_server_request_call_type) GetProcAddress(library, "grpc_server_request_call");
grpc_server_register_method_import = (grpc_server_register_method_type) GetProcAddress(library, "grpc_server_register_method");
grpc_server_request_registered_call_import = (grpc_server_request_registered_call_type) GetProcAddress(library, "grpc_server_request_registered_call");

@ -275,6 +275,9 @@ extern grpc_channel_register_call_type grpc_channel_register_call_import;
typedef grpc_call *(*grpc_channel_create_registered_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline, void *reserved);
extern grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
#define grpc_channel_create_registered_call grpc_channel_create_registered_call_import
typedef void *(*grpc_call_arena_alloc_type)(grpc_call *call, size_t size);
extern grpc_call_arena_alloc_type grpc_call_arena_alloc_import;
#define grpc_call_arena_alloc grpc_call_arena_alloc_import
typedef grpc_call_error(*grpc_call_start_batch_type)(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved);
extern grpc_call_start_batch_type grpc_call_start_batch_import;
#define grpc_call_start_batch grpc_call_start_batch_import
@ -308,9 +311,12 @@ extern grpc_call_cancel_type grpc_call_cancel_import;
typedef grpc_call_error(*grpc_call_cancel_with_status_type)(grpc_call *call, grpc_status_code status, const char *description, void *reserved);
extern grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
#define grpc_call_cancel_with_status grpc_call_cancel_with_status_import
typedef void(*grpc_call_destroy_type)(grpc_call *call);
extern grpc_call_destroy_type grpc_call_destroy_import;
#define grpc_call_destroy grpc_call_destroy_import
typedef void(*grpc_call_ref_type)(grpc_call *call);
extern grpc_call_ref_type grpc_call_ref_import;
#define grpc_call_ref grpc_call_ref_import
typedef void(*grpc_call_unref_type)(grpc_call *call);
extern grpc_call_unref_type grpc_call_unref_import;
#define grpc_call_unref grpc_call_unref_import
typedef grpc_call_error(*grpc_server_request_call_type)(grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new);
extern grpc_server_request_call_type grpc_server_request_call_import;
#define grpc_server_request_call grpc_server_request_call_import

@ -132,11 +132,15 @@ static VALUE grpc_rb_server_alloc(VALUE cls) {
Initializes server instances. */
static VALUE grpc_rb_server_init(VALUE self, VALUE channel_args) {
grpc_completion_queue *cq = grpc_completion_queue_create_for_pluck(NULL);
grpc_completion_queue *cq = NULL;
grpc_rb_server *wrapper = NULL;
grpc_server *srv = NULL;
grpc_channel_args args;
MEMZERO(&args, grpc_channel_args, 1);
grpc_ruby_once_init();
cq = grpc_completion_queue_create_for_pluck(NULL);
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type,
wrapper);
grpc_rb_hash_convert_to_channel_args(channel_args, &args);

@ -314,8 +314,10 @@
set(CMAKE_CXX_FLAGS "<%text>${CMAKE_CXX_FLAGS}</%text> -std=c++11")
endif()
if(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES dl rt m pthread)
if(_gRPC_PLATFORM_MAC)
set(_gRPC_ALLTARGETS_LIBRARIES <%text>${CMAKE_DL_LIBS}</%text> m pthread)
elseif(UNIX)
set(_gRPC_ALLTARGETS_LIBRARIES <%text>${CMAKE_DL_LIBS}</%text> rt m pthread)
endif()
if(WIN32 AND MSVC)

@ -205,7 +205,7 @@
% endfor
]
}],
['OS == "win"', {
['OS == "win" and runtime!="electron"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
@ -216,6 +216,8 @@
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
#
# This is not true of Electron, which does not have OpenSSL headers.
'target_name': 'WINDOWS_BUILD_WARNING',
'rules': [
{

@ -10,6 +10,8 @@
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/src/php/ext/grpc)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/boringssl/include)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/cares)
LIBS="-lpthread $LIBS"
@ -20,8 +22,11 @@
PHP_ADD_LIBRARY(dl)
case $host in
*darwin*) ;;
*darwin*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_darwin)
;;
*)
PHP_ADD_INCLUDE(PHP_EXT_SRCDIR()/third_party/cares/config_linux)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;

@ -0,0 +1,34 @@
%YAML 1.2
--- |
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!!
__version__ = """${settings.python_version.pep440()}"""

@ -103,7 +103,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload != NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
grpc_byte_buffer_destroy(payload);
cq_verifier_destroy(cqv);
}

@ -131,7 +131,7 @@ static void server_verifier(grpc_server *server, grpc_completion_queue *cq,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}
@ -177,7 +177,7 @@ static void server_verifier_sends_too_much_metadata(grpc_server *server,
grpc_slice_unref(meta.value);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -76,7 +76,7 @@ static void verifier_succeeds(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload != NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
grpc_byte_buffer_destroy(payload);
cq_verifier_destroy(cqv);
}
@ -102,7 +102,7 @@ static void verifier_fails(grpc_server *server, grpc_completion_queue *cq,
GPR_ASSERT(payload == NULL);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -122,7 +122,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}

@ -115,7 +115,7 @@ static void run_test(const char *target, size_t nops) {
GPR_ASSERT(status != GRPC_STATUS_OK);
grpc_call_destroy(c);
grpc_call_unref(c);
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);

@ -394,7 +394,7 @@ static request_sequences perform_request(servers_fixture *f,
"foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 1);
grpc_call_destroy(f->server_calls[s_idx]);
grpc_call_unref(f->server_calls[s_idx]);
/* ask for the next request on this server */
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
@ -420,7 +420,7 @@ static request_sequences perform_request(servers_fixture *f,
cq_verifier_destroy(cqv);
grpc_call_destroy(c);
grpc_call_unref(c);
for (i = 0; i < f->num_servers; i++) {
grpc_call_details_destroy(&rdata->call_details[i]);
@ -616,7 +616,7 @@ static void test_pending_calls(size_t concurrent_calls) {
/* destroy the calls after the channel so that they are still around for the
* LB's shutdown func to process */
for (i = 0; i < concurrent_calls; i++) {
grpc_call_destroy(calls[i]);
grpc_call_unref(calls[i]);
}
gpr_free(calls);
teardown_servers(f);

@ -236,7 +236,7 @@ static void cleanup_rpc(grpc_exec_ctx *exec_ctx) {
grpc_event ev;
grpc_slice_buffer_destroy_internal(exec_ctx, &state.temp_incoming_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &state.outgoing_buffer);
grpc_call_destroy(state.call);
grpc_call_unref(state.call);
grpc_completion_queue_shutdown(state.cq);
do {
ev = grpc_completion_queue_next(state.cq, n_sec_deadline(1), NULL);

@ -138,7 +138,7 @@ static void run_test(bool wait_for_ready, bool use_service_config) {
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq);
grpc_call_destroy(call);
grpc_call_unref(call);
grpc_channel_destroy(chan);
cq_verifier_destroy(cqv);

@ -243,7 +243,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
grpc_slice_str_cmp(call_details.host, "foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 1);
grpc_call_destroy(s);
grpc_call_unref(s);
} else {
/* Check for a failed connection. */
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
@ -252,7 +252,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
}
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);

@ -343,7 +343,7 @@ static void simple_request_body(grpc_end2end_test_fixture f,
CQ_EXPECT_COMPLETION(cqv, tag(1), expected_result == SUCCESS);
cq_verify(cqv);
grpc_call_destroy(c);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
}

@ -148,8 +148,8 @@ void grpc_end2end_proxy_destroy(grpc_end2end_proxy *proxy) {
static void unrefpc(proxy_call *pc, const char *reason) {
if (gpr_unref(&pc->refs)) {
grpc_call_destroy(pc->c2p);
grpc_call_destroy(pc->p2s);
grpc_call_unref(pc->c2p);
grpc_call_unref(pc->p2s);
grpc_metadata_array_destroy(&pc->c2p_initial_metadata);
grpc_metadata_array_destroy(&pc->p2s_initial_metadata);
grpc_metadata_array_destroy(&pc->p2s_trailing_metadata);

@ -661,7 +661,7 @@ static void read_metadata(input_stream *inp, size_t *count,
}
static call_state *destroy_call(call_state *call) {
grpc_call_destroy(call->call);
grpc_call_unref(call->call);
call->call = NULL;
return maybe_delete_call_state(call);
}

@ -151,7 +151,7 @@ done:
ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
}
grpc_call_destroy(call);
grpc_call_unref(call);
grpc_completion_queue_destroy(cq);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save