Merge branch 'master' into server_try_cancel_api

pull/4958/head
Sree Kuchibhotla 9 years ago
commit a0a8eaab0e
  1. 5
      .gitignore
  2. 8
      BUILD
  3. 41
      Makefile
  4. 80
      binding.gyp
  5. 13
      build.yaml
  6. 5
      gRPC.podspec
  7. 3
      grpc.gemspec
  8. 99
      include/grpc/census.h
  9. 21
      include/grpc/compression.h
  10. 158
      include/grpc/grpc.h
  11. 120
      include/grpc/grpc_security.h
  12. 4
      include/grpc/grpc_zookeeper.h
  13. 16
      include/grpc/impl/codegen/alloc.h
  14. 27
      include/grpc/impl/codegen/byte_buffer.h
  15. 12
      include/grpc/impl/codegen/log.h
  16. 14
      include/grpc/impl/codegen/port_platform.h
  17. 32
      include/grpc/impl/codegen/slice.h
  18. 33
      include/grpc/impl/codegen/slice_buffer.h
  19. 44
      include/grpc/impl/codegen/sync.h
  20. 46
      include/grpc/impl/codegen/time.h
  21. 14
      include/grpc/support/avl.h
  22. 28
      include/grpc/support/cmdline.h
  23. 8
      include/grpc/support/cpu.h
  24. 43
      include/grpc/support/histogram.h
  25. 8
      include/grpc/support/host_port.h
  26. 2
      include/grpc/support/log_win32.h
  27. 6
      include/grpc/support/string_util.h
  28. 14
      include/grpc/support/subprocess.h
  29. 18
      include/grpc/support/thd.h
  30. 3
      package.json
  31. 3
      setup.cfg
  32. 4
      src/core/census/grpc_filter.c
  33. 114
      src/core/census/placeholders.c
  34. 23
      src/core/channel/client_channel.c
  35. 6
      src/core/channel/client_uchannel.c
  36. 6
      src/core/channel/compress_filter.c
  37. 4
      src/core/channel/http_client_filter.c
  38. 4
      src/core/channel/http_server_filter.c
  39. 18
      src/core/channel/subchannel_call_holder.c
  40. 18
      src/core/client_config/lb_policies/pick_first.c
  41. 14
      src/core/client_config/lb_policies/round_robin.c
  42. 6
      src/core/client_config/resolvers/dns_resolver.c
  43. 6
      src/core/client_config/resolvers/sockaddr_resolver.c
  44. 4
      src/core/client_config/resolvers/zookeeper_resolver.c
  45. 18
      src/core/client_config/subchannel.c
  46. 10
      src/core/httpcli/httpcli.c
  47. 8
      src/core/iomgr/closure.c
  48. 9
      src/core/iomgr/closure.h
  49. 18
      src/core/iomgr/exec_ctx.c
  50. 17
      src/core/iomgr/exec_ctx.h
  51. 8
      src/core/iomgr/executor.c
  52. 4
      src/core/iomgr/executor.h
  53. 6
      src/core/iomgr/fd_posix.c
  54. 4
      src/core/iomgr/iocp_windows.c
  55. 6
      src/core/iomgr/pollset_multipoller_with_epoll.c
  56. 13
      src/core/iomgr/pollset_posix.c
  57. 4
      src/core/iomgr/pollset_windows.c
  58. 4
      src/core/iomgr/resolve_address_posix.c
  59. 4
      src/core/iomgr/resolve_address_windows.c
  60. 14
      src/core/iomgr/tcp_client_posix.c
  61. 8
      src/core/iomgr/tcp_client_windows.c
  62. 14
      src/core/iomgr/tcp_posix.c
  63. 9
      src/core/iomgr/tcp_server_posix.c
  64. 6
      src/core/iomgr/tcp_server_windows.c
  65. 20
      src/core/iomgr/tcp_windows.c
  66. 6
      src/core/iomgr/timer.c
  67. 6
      src/core/iomgr/workqueue.h
  68. 6
      src/core/iomgr/workqueue_posix.c
  69. 4
      src/core/security/credentials.c
  70. 4
      src/core/security/google_default_credentials.c
  71. 11
      src/core/security/handshake.c
  72. 10
      src/core/security/secure_endpoint.c
  73. 45
      src/core/security/security_connector.c
  74. 3
      src/core/security/security_connector.h
  75. 4
      src/core/security/server_auth_filter.c
  76. 2
      src/core/security/server_secure_chttp2.c
  77. 2
      src/core/statistics/census_init.c
  78. 141
      src/core/support/subprocess_windows.c
  79. 6
      src/core/surface/alarm.c
  80. 22
      src/core/surface/call.c
  81. 6
      src/core/surface/channel.c
  82. 6
      src/core/surface/channel_connectivity.c
  83. 6
      src/core/surface/channel_create.c
  84. 4
      src/core/surface/channel_ping.c
  85. 6
      src/core/surface/completion_queue.c
  86. 6
      src/core/surface/lame_client.c
  87. 6
      src/core/surface/secure_channel_create.c
  88. 30
      src/core/surface/server.c
  89. 2
      src/core/surface/server_chttp2.c
  90. 4
      src/core/transport/chttp2/hpack_encoder.c
  91. 15
      src/core/transport/chttp2/internal.h
  92. 21
      src/core/transport/chttp2/stream_lists.c
  93. 20
      src/core/transport/chttp2/writing.c
  94. 34
      src/core/transport/chttp2_transport.c
  95. 12
      src/core/transport/connectivity_state.c
  96. 10
      src/core/transport/transport.c
  97. 15
      src/core/transport/transport.h
  98. 4
      src/csharp/build_packages.bat
  99. 4
      src/node/ext/node_grpc.cc
  100. 2
      src/node/performance/benchmark_server.js
  101. Some files were not shown because too many files have changed in this diff Show More

5
.gitignore vendored

@ -5,6 +5,7 @@ libs
objs
# Python items
python_build/
.coverage*
.eggs
.tox
@ -12,6 +13,10 @@ htmlcov/
dist/
*.egg
# Node installation output
node_modules/
src/node/extension_binary/
# gcov coverage data
reports
coverage

@ -84,6 +84,7 @@ cc_library(
"src/core/support/string_posix.c",
"src/core/support/string_win32.c",
"src/core/support/subprocess_posix.c",
"src/core/support/subprocess_windows.c",
"src/core/support/sync.c",
"src/core/support/sync_posix.c",
"src/core/support/sync_win32.c",
@ -387,6 +388,7 @@ cc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/surface/alarm.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_reader.c",
@ -438,6 +440,7 @@ cc_library(
"src/core/census/context.c",
"src/core/census/initialize.c",
"src/core/census/operation.c",
"src/core/census/placeholders.c",
"src/core/census/tag_set.c",
"src/core/census/tracing.c",
],
@ -663,6 +666,7 @@ cc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/surface/alarm.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_reader.c",
@ -714,6 +718,7 @@ cc_library(
"src/core/census/context.c",
"src/core/census/initialize.c",
"src/core/census/operation.c",
"src/core/census/placeholders.c",
"src/core/census/tag_set.c",
"src/core/census/tracing.c",
],
@ -1154,6 +1159,7 @@ objc_library(
"src/core/support/string_posix.c",
"src/core/support/string_win32.c",
"src/core/support/subprocess_posix.c",
"src/core/support/subprocess_windows.c",
"src/core/support/sync.c",
"src/core/support/sync_posix.c",
"src/core/support/sync_win32.c",
@ -1337,6 +1343,7 @@ objc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/surface/alarm.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_reader.c",
@ -1388,6 +1395,7 @@ objc_library(
"src/core/census/context.c",
"src/core/census/initialize.c",
"src/core/census/operation.c",
"src/core/census/placeholders.c",
"src/core/census/tag_set.c",
"src/core/census/tracing.c",
],

@ -824,6 +824,7 @@ systemtap_dep_error:
stop:
@false
alarm_test: $(BINDIR)/$(CONFIG)/alarm_test
algorithm_test: $(BINDIR)/$(CONFIG)/algorithm_test
alloc_test: $(BINDIR)/$(CONFIG)/alloc_test
alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
@ -1132,6 +1133,7 @@ endif
buildtests: buildtests_c buildtests_cxx buildtests_zookeeper
buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/alarm_test \
$(BINDIR)/$(CONFIG)/algorithm_test \
$(BINDIR)/$(CONFIG)/alloc_test \
$(BINDIR)/$(CONFIG)/alpn_test \
@ -1359,6 +1361,8 @@ test: test_c test_cxx test_zookeeper
flaky_test: flaky_test_c flaky_test_cxx flaky_test_zookeeper
test_c: buildtests_c
$(E) "[RUN] Testing alarm_test"
$(Q) $(BINDIR)/$(CONFIG)/alarm_test || ( echo test alarm_test failed ; exit 1 )
$(E) "[RUN] Testing algorithm_test"
$(Q) $(BINDIR)/$(CONFIG)/algorithm_test || ( echo test algorithm_test failed ; exit 1 )
$(E) "[RUN] Testing alloc_test"
@ -2231,6 +2235,7 @@ LIBGPR_SRC = \
src/core/support/string_posix.c \
src/core/support/string_win32.c \
src/core/support/subprocess_posix.c \
src/core/support/subprocess_windows.c \
src/core/support/sync.c \
src/core/support/sync_posix.c \
src/core/support/sync_win32.c \
@ -2454,6 +2459,7 @@ LIBGRPC_SRC = \
src/core/json/json_reader.c \
src/core/json/json_string.c \
src/core/json/json_writer.c \
src/core/surface/alarm.c \
src/core/surface/api_trace.c \
src/core/surface/byte_buffer.c \
src/core/surface/byte_buffer_reader.c \
@ -2505,6 +2511,7 @@ LIBGRPC_SRC = \
src/core/census/context.c \
src/core/census/initialize.c \
src/core/census/operation.c \
src/core/census/placeholders.c \
src/core/census/tag_set.c \
src/core/census/tracing.c \
@ -2758,6 +2765,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/json/json_reader.c \
src/core/json/json_string.c \
src/core/json/json_writer.c \
src/core/surface/alarm.c \
src/core/surface/api_trace.c \
src/core/surface/byte_buffer.c \
src/core/surface/byte_buffer_reader.c \
@ -2809,6 +2817,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/census/context.c \
src/core/census/initialize.c \
src/core/census/operation.c \
src/core/census/placeholders.c \
src/core/census/tag_set.c \
src/core/census/tracing.c \
@ -5748,6 +5757,38 @@ endif
# All of the test targets, and protoc plugins
ALARM_TEST_SRC = \
test/core/surface/alarm_test.c \
ALARM_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(ALARM_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/alarm_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/alarm_test: $(ALARM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(ALARM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/alarm_test
endif
$(OBJDIR)/$(CONFIG)/test/core/surface/alarm_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_alarm_test: $(ALARM_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(ALARM_TEST_OBJS:.o=.dep)
endif
endif
ALGORITHM_TEST_SRC = \
test/core/compression/algorithm_test.c \

@ -37,29 +37,45 @@
# Some of this file is built with the help of
# https://n8.io/converting-a-c-library-to-gyp/
{
'variables': {
'config': '<!(echo $CONFIG)'
},
# TODO: Finish windows support
'target_defaults': {
# Empirically, Node only exports ALPN symbols if its major version is >0.
# io.js always reports versions >0 and always exports ALPN symbols.
# Therefore, Node's major version will be truthy if and only if it
# supports ALPN. The output of "node -v" is v[major].[minor].[patch],
# like "v4.1.1" in a recent version. We use cut to split by period and
# take the first field (resulting in "v[major]"), then use cut again
# to take all but the first character, removing the "v".
'defines': [
'TSI_OPENSSL_ALPN_SUPPORT=<!(node --version | cut -d. -f1 | cut -c2-)'
],
'include_dirs': [
'.',
'include'
],
'conditions': [
['OS == "win"', {
"include_dirs": [ "third_party/boringssl/include" ]
}, {
"include_dirs": [ "third_party/boringssl/include" ],
"defines": [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
'OPENSSL_NO_ASM'
],
"msvs_settings": {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}, { # OS != "win"
# Empirically, Node only exports ALPN symbols if its major version is >0.
# io.js always reports versions >0 and always exports ALPN symbols.
# Therefore, Node's major version will be truthy if and only if it
# supports ALPN. The output of "node -v" is v[major].[minor].[patch],
# like "v4.1.1" in a recent version. We use cut to split by period and
# take the first field (resulting in "v[major]"), then use cut again
# to take all but the first character, removing the "v".
'defines': [
'TSI_OPENSSL_ALPN_SUPPORT=<!(node --version | cut -d. -f1 | cut -c2-)'
],
'variables': {
'config': '<!(echo $CONFIG)'
},
'include_dirs': [
'<(node_root_dir)/deps/openssl/openssl/include',
'<(node_root_dir)/deps/zlib'
@ -93,6 +109,29 @@
'conditions': [
['OS == "win"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
# This library does not build on Windows without modifying the Node
# development packages that node-gyp downloads in order to build.
# Due to https://github.com/nodejs/node/issues/4932, the headers for
# BoringSSL conflict with the OpenSSL headers included by default
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
'target_name': 'WINDOWS_BUILD_WARNING',
'actions': [
{
'action_name': 'WINDOWS_BUILD_WARNING',
'inputs': [
'package.json'
],
'outputs': [
'ignore_this_part'
],
'action': ['echo', 'IMPORTANT: Due to https://github.com/nodejs/node/issues/4932, to build this library on Windows, you must first remove <(node_root_dir)/include/node/openssl/']
}
]
},
# Only want to compile BoringSSL and zlib under Windows
{
'cflags': [
@ -400,8 +439,7 @@
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls_record.c',
],
"include_dirs": [ "third_party/boringssl/include" ]
]
},
{
'cflags': [
@ -430,8 +468,7 @@
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
"include_dirs": [ "third_party/boringssl/include" ]
]
},
]
}]
@ -479,6 +516,7 @@
'src/core/support/string_posix.c',
'src/core/support/string_win32.c',
'src/core/support/subprocess_posix.c',
'src/core/support/subprocess_windows.c',
'src/core/support/sync.c',
'src/core/support/sync_posix.c',
'src/core/support/sync_win32.c',
@ -611,6 +649,7 @@
'src/core/json/json_reader.c',
'src/core/json/json_string.c',
'src/core/json/json_writer.c',
'src/core/surface/alarm.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_reader.c',
@ -662,6 +701,7 @@
'src/core/census/context.c',
'src/core/census/initialize.c',
'src/core/census/operation.c',
'src/core/census/placeholders.c',
'src/core/census/tag_set.c',
'src/core/census/tracing.c',
],

@ -20,6 +20,7 @@ filegroups:
- src/core/census/context.c
- src/core/census/initialize.c
- src/core/census/operation.c
- src/core/census/placeholders.c
- src/core/census/tag_set.c
- src/core/census/tracing.c
- name: grpc++_base
@ -334,6 +335,7 @@ filegroups:
- src/core/json/json_reader.c
- src/core/json/json_string.c
- src/core/json/json_writer.c
- src/core/surface/alarm.c
- src/core/surface/api_trace.c
- src/core/surface/byte_buffer.c
- src/core/surface/byte_buffer_reader.c
@ -497,6 +499,7 @@ libs:
- src/core/support/string_posix.c
- src/core/support/string_win32.c
- src/core/support/subprocess_posix.c
- src/core/support/subprocess_windows.c
- src/core/support/sync.c
- src/core/support/sync_posix.c
- src/core/support/sync_win32.c
@ -882,6 +885,16 @@ libs:
- winsock
- global
targets:
- name: alarm_test
build: test
language: c
src:
- test/core/surface/alarm_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: algorithm_test
build: test
language: c

@ -151,6 +151,7 @@ Pod::Spec.new do |s|
'src/core/support/string_posix.c',
'src/core/support/string_win32.c',
'src/core/support/subprocess_posix.c',
'src/core/support/subprocess_windows.c',
'src/core/support/sync.c',
'src/core/support/sync_posix.c',
'src/core/support/sync_win32.c',
@ -398,6 +399,7 @@ Pod::Spec.new do |s|
'src/core/json/json_reader.c',
'src/core/json/json_string.c',
'src/core/json/json_writer.c',
'src/core/surface/alarm.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_reader.c',
@ -449,6 +451,7 @@ Pod::Spec.new do |s|
'src/core/census/context.c',
'src/core/census/initialize.c',
'src/core/census/operation.c',
'src/core/census/placeholders.c',
'src/core/census/tag_set.c',
'src/core/census/tracing.c'
@ -605,7 +608,7 @@ Pod::Spec.new do |s|
ss.requires_arc = false
ss.libraries = 'z'
ss.dependency 'BoringSSL', '~> 1.0'
ss.dependency 'BoringSSL', '~> 2.0'
# ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
end

@ -134,6 +134,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/support/string_posix.c )
s.files += %w( src/core/support/string_win32.c )
s.files += %w( src/core/support/subprocess_posix.c )
s.files += %w( src/core/support/subprocess_windows.c )
s.files += %w( src/core/support/sync.c )
s.files += %w( src/core/support/sync_posix.c )
s.files += %w( src/core/support/sync_win32.c )
@ -381,6 +382,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/json/json_reader.c )
s.files += %w( src/core/json/json_string.c )
s.files += %w( src/core/json/json_writer.c )
s.files += %w( src/core/surface/alarm.c )
s.files += %w( src/core/surface/api_trace.c )
s.files += %w( src/core/surface/byte_buffer.c )
s.files += %w( src/core/surface/byte_buffer_reader.c )
@ -432,6 +434,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/census/context.c )
s.files += %w( src/core/census/initialize.c )
s.files += %w( src/core/census/operation.c )
s.files += %w( src/core/census/placeholders.c )
s.files += %w( src/core/census/tag_set.c )
s.files += %w( src/core/census/tracing.c )
end

@ -59,15 +59,15 @@ enum census_features {
* census_initialize() will return a non-zero value. It is an error to call
* census_initialize() more than once (without an intervening
* census_shutdown()). */
int census_initialize(int features);
void census_shutdown(void);
CENSUS_API int census_initialize(int features);
CENSUS_API void census_shutdown(void);
/** Return the features supported by the current census implementation (not all
* features will be available on all platforms). */
int census_supported(void);
CENSUS_API int census_supported(void);
/** Return the census features currently enabled. */
int census_enabled(void);
CENSUS_API int census_enabled(void);
/**
Context is a handle used by census to represent the current tracing and
@ -90,8 +90,8 @@ typedef struct census_context census_context;
*
* TODO(aveitch): determine how best to communicate required/max buffer size
* so caller doesn't have to guess. */
size_t census_context_serialize(const census_context *context, char *buffer,
size_t buf_size);
CENSUS_API size_t census_context_serialize(const census_context *context,
char *buffer, size_t buf_size);
/* Distributed traces can have a number of options. */
enum census_trace_mask_values {
@ -101,10 +101,10 @@ enum census_trace_mask_values {
/** Get the current trace mask associated with this context. The value returned
will be the logical or of census_trace_mask_values values. */
int census_trace_mask(const census_context *context);
CENSUS_API int census_trace_mask(const census_context *context);
/** Set the trace mask associated with a context. */
void census_set_trace_mask(int trace_mask);
CENSUS_API void census_set_trace_mask(int trace_mask);
/* The concept of "operation" is a fundamental concept for Census. In an RPC
system, and operation typcially represents a single RPC, or a significant
@ -152,7 +152,7 @@ typedef struct {
@return A timestamp representing the operation start time.
*/
census_timestamp census_start_rpc_op_timestamp(void);
CENSUS_API census_timestamp census_start_rpc_op_timestamp(void);
/**
Represent functions to map RPC name ID to service/method names. Census
@ -204,7 +204,7 @@ typedef struct {
@return A new census context.
*/
census_context *census_start_client_rpc_op(
CENSUS_API census_context *census_start_client_rpc_op(
const census_context *context, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
const census_timestamp *start_time);
@ -212,7 +212,8 @@ census_context *census_start_client_rpc_op(
/**
Add peer information to a context representing a client RPC operation.
*/
void census_set_rpc_client_peer(census_context *context, const char *peer);
CENSUS_API void census_set_rpc_client_peer(census_context *context,
const char *peer);
/**
Start a server RPC operation. Returns a new context to be used in future
@ -232,7 +233,7 @@ void census_set_rpc_client_peer(census_context *context, const char *peer);
@return A new census context.
*/
census_context *census_start_server_rpc_op(
CENSUS_API census_context *census_start_server_rpc_op(
const char *buffer, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
census_timestamp *start_time);
@ -262,8 +263,9 @@ census_context *census_start_server_rpc_op(
@return A new census context.
*/
census_context *census_start_op(census_context *context, const char *family,
const char *name, int trace_mask);
CENSUS_API census_context *census_start_op(census_context *context,
const char *family, const char *name,
int trace_mask);
/**
End an operation started by any of the census_start_*_op*() calls. The
@ -274,7 +276,7 @@ census_context *census_start_op(census_context *context, const char *family,
@param status status associated with the operation. Not interpreted by
census.
*/
void census_end_op(census_context *context, int status);
CENSUS_API void census_end_op(census_context *context, int status);
#define CENSUS_TRACE_RECORD_START_OP ((uint32_t)0)
#define CENSUS_TRACE_RECORD_END_OP ((uint32_t)1)
@ -286,8 +288,8 @@ void census_end_op(census_context *context, int status);
@param buffer Pointer to buffer to use
@param n Number of bytes in buffer
*/
void census_trace_print(census_context *context, uint32_t type,
const char *buffer, size_t n);
CENSUS_API void census_trace_print(census_context *context, uint32_t type,
const char *buffer, size_t n);
/** Trace record. */
typedef struct {
@ -308,7 +310,7 @@ typedef struct {
while scanning is ongoing.
@returns 0 on success, non-zero on failure (e.g. if a scan is already ongoing)
*/
int census_trace_scan_start(int consume);
CENSUS_API int census_trace_scan_start(int consume);
/** Get a trace record. The data pointed to by the trace buffer is guaranteed
stable until the next census_get_trace_record() call (if the consume
@ -319,10 +321,10 @@ int census_trace_scan_start(int consume);
census_trace_scan_start()), 0 if there is no more trace data (and
trace_record will not be modified) or 1 otherwise.
*/
int census_get_trace_record(census_trace_record *trace_record);
CENSUS_API int census_get_trace_record(census_trace_record *trace_record);
/** End a scan previously started by census_trace_scan_start() */
void census_trace_scan_end();
CENSUS_API void census_trace_scan_end();
/* A Census tag set is a collection of key:value string pairs; these form the
basis against which Census metrics will be recorded. Keys are unique within
@ -392,16 +394,16 @@ typedef struct {
tag set and status of the tags used in its creation.
@return A new, valid census_tag_set.
*/
census_tag_set *census_tag_set_create(
CENSUS_API census_tag_set *census_tag_set_create(
const census_tag_set *base, const census_tag *tags, int ntags,
census_tag_set_create_status const **status);
/* Destroy a tag set created by census_tag_set_create(). Once this function
has been called, the tag set cannot be reused. */
void census_tag_set_destroy(census_tag_set *tags);
CENSUS_API void census_tag_set_destroy(census_tag_set *tags);
/* Get a pointer to the original status from the creation of this tag set. */
const census_tag_set_create_status *census_tag_set_get_create_status(
CENSUS_API const census_tag_set_create_status *census_tag_set_get_create_status(
const census_tag_set *tags);
/* Structure used for tag set iteration. API clients should not use or
@ -416,18 +418,19 @@ typedef struct {
/* Initialize a tag set iterator. Must be called before first use of the
iterator. */
void census_tag_set_initialize_iterator(const census_tag_set *tags,
census_tag_set_iterator *iterator);
CENSUS_API void census_tag_set_initialize_iterator(
const census_tag_set *tags, census_tag_set_iterator *iterator);
/* Get the contents of the "next" tag in the tag set. If there are no more
tags in the tag set, returns 0 (and 'tag' contents will be unchanged),
otherwise returns 1. */
int census_tag_set_next_tag(census_tag_set_iterator *iterator, census_tag *tag);
CENSUS_API int census_tag_set_next_tag(census_tag_set_iterator *iterator,
census_tag *tag);
/* Get a tag by its key. Returns 0 if the key is not present in the tag
set. */
int census_tag_set_get_tag_by_key(const census_tag_set *tags, const char *key,
census_tag *tag);
CENSUS_API int census_tag_set_get_tag_by_key(const census_tag_set *tags,
const char *key, census_tag *tag);
/* Tag set encode/decode functionality. These functionas are intended
for use by RPC systems only, for purposes of transmitting/receiving tag
@ -449,17 +452,19 @@ int census_tag_set_get_tag_by_key(const census_tag_set *tags, const char *key,
[buffer, buffer + *print_buf_size) and binary tags into
[returned-ptr, returned-ptr + *bin_buf_size) (and the return value
should be buffer + *print_buf_size) */
char *census_tag_set_encode(const census_tag_set *tags, char *buffer,
size_t buf_size, size_t *print_buf_size,
size_t *bin_buf_size);
CENSUS_API char *census_tag_set_encode(const census_tag_set *tags, char *buffer,
size_t buf_size, size_t *print_buf_size,
size_t *bin_buf_size);
/* Decode tag set buffers encoded with census_tag_set_encode_*(). Returns NULL
if there is an error in parsing either buffer. */
census_tag_set *census_tag_set_decode(const char *buffer, size_t size,
const char *bin_buffer, size_t bin_size);
CENSUS_API census_tag_set *census_tag_set_decode(const char *buffer,
size_t size,
const char *bin_buffer,
size_t bin_size);
/* Get a contexts tag set. */
census_tag_set *census_context_tag_set(census_context *context);
CENSUS_API census_tag_set *census_context_tag_set(census_context *context);
/* Core stats collection API's. The following concepts are used:
* Aggregation: A collection of values. Census supports the following
@ -490,8 +495,8 @@ typedef struct {
} census_value;
/* Record new usage values against the given context. */
void census_record_values(census_context *context, census_value *values,
size_t nvalues);
CENSUS_API void census_record_values(census_context *context,
census_value *values, size_t nvalues);
/** Type representing a particular aggregation */
typedef struct census_aggregation_ops census_aggregation_ops;
@ -521,24 +526,25 @@ typedef struct census_view census_view;
@return A new census view
*/
census_view *census_view_create(uint32_t metric_id, const census_tag_set *tags,
const census_aggregation *aggregations,
size_t naggregations);
CENSUS_API census_view *census_view_create(
uint32_t metric_id, const census_tag_set *tags,
const census_aggregation *aggregations, size_t naggregations);
/** Destroy a previously created view. */
void census_view_delete(census_view *view);
CENSUS_API void census_view_delete(census_view *view);
/** Metric ID associated with a view */
size_t census_view_metric(const census_view *view);
CENSUS_API size_t census_view_metric(const census_view *view);
/** Number of aggregations associated with view. */
size_t census_view_naggregations(const census_view *view);
CENSUS_API size_t census_view_naggregations(const census_view *view);
/** Get tags associated with view. */
const census_tag_set *census_view_tags(const census_view *view);
CENSUS_API const census_tag_set *census_view_tags(const census_view *view);
/** Get aggregation descriptors associated with a view. */
const census_aggregation *census_view_aggregrations(const census_view *view);
CENSUS_API const census_aggregation *census_view_aggregrations(
const census_view *view);
/** Holds all the aggregation data for a particular view instantiation. Forms
part of the data returned by census_view_data(). */
@ -557,10 +563,11 @@ typedef struct {
@param view View from which to get data.
@return Full set of data for all aggregations for the view.
*/
const census_view_data *census_view_get_data(const census_view *view);
CENSUS_API const census_view_data *census_view_get_data(
const census_view *view);
/** Reset all view data to zero for the specified view */
void census_view_reset(census_view *view);
CENSUS_API void census_view_reset(census_view *view);
#ifdef __cplusplus
}

@ -46,32 +46,33 @@ extern "C" {
/** Parses the first \a name_length bytes of \a name as a
* grpc_compression_algorithm instance, updating \a algorithm. Returns 1 upon
* success, 0 otherwise. */
int grpc_compression_algorithm_parse(const char *name, size_t name_length,
grpc_compression_algorithm *algorithm);
GRPC_API int grpc_compression_algorithm_parse(
const char *name, size_t name_length,
grpc_compression_algorithm *algorithm);
/** Updates \a name with the encoding name corresponding to a valid \a
* algorithm. Returns 1 upon success, 0 otherwise. */
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name);
GRPC_API int grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm, char **name);
/** Returns the compression algorithm corresponding to \a level.
*
* It abort()s for unknown levels . */
grpc_compression_algorithm grpc_compression_algorithm_for_level(
grpc_compression_level level);
GRPC_API grpc_compression_algorithm
grpc_compression_algorithm_for_level(grpc_compression_level level);
void grpc_compression_options_init(grpc_compression_options *opts);
GRPC_API void grpc_compression_options_init(grpc_compression_options *opts);
/** Mark \a algorithm as enabled in \a opts. */
void grpc_compression_options_enable_algorithm(
GRPC_API void grpc_compression_options_enable_algorithm(
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
/** Mark \a algorithm as disabled in \a opts. */
void grpc_compression_options_disable_algorithm(
GRPC_API void grpc_compression_options_disable_algorithm(
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
/** Returns true if \a algorithm is marked as enabled in \a opts. */
int grpc_compression_options_is_algorithm_enabled(
GRPC_API int grpc_compression_options_is_algorithm_enabled(
const grpc_compression_options *opts, grpc_compression_algorithm algorithm);
#ifdef __cplusplus

@ -55,11 +55,11 @@ extern "C" {
* functionality lives in grpc_security.h.
*/
void grpc_metadata_array_init(grpc_metadata_array *array);
void grpc_metadata_array_destroy(grpc_metadata_array *array);
GRPC_API void grpc_metadata_array_init(grpc_metadata_array *array);
GRPC_API void grpc_metadata_array_destroy(grpc_metadata_array *array);
void grpc_call_details_init(grpc_call_details *details);
void grpc_call_details_destroy(grpc_call_details *details);
GRPC_API void grpc_call_details_init(grpc_call_details *details);
GRPC_API void grpc_call_details_destroy(grpc_call_details *details);
/** Registers a plugin to be initialized and destroyed with the library.
@ -69,7 +69,7 @@ void grpc_call_details_destroy(grpc_call_details *details);
(and hence so will \a init and \a destroy).
It is safe to pass NULL to either argument. Plugins are destroyed in
the reverse order they were initialized. */
void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
GRPC_API void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
/** Initialize the grpc library.
@ -77,7 +77,7 @@ void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
(To avoid overhead, little checking is done, and some things may work. We
do not warrant that they will continue to do so in future revisions of this
library). */
void grpc_init(void);
GRPC_API void grpc_init(void);
/** Shut down the grpc library.
@ -85,13 +85,13 @@ void grpc_init(void);
executing within the grpc library.
Prior to calling, all application owned grpc objects must have been
destroyed. */
void grpc_shutdown(void);
GRPC_API void grpc_shutdown(void);
/** Return a string representing the current version of grpc */
const char *grpc_version_string(void);
GRPC_API const char *grpc_version_string(void);
/** Create a completion queue */
grpc_completion_queue *grpc_completion_queue_create(void *reserved);
GRPC_API grpc_completion_queue *grpc_completion_queue_create(void *reserved);
/** Blocks until an event is available, the completion queue is being shut down,
or deadline is reached.
@ -101,8 +101,9 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved);
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline, void *reserved);
GRPC_API grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline,
void *reserved);
/** Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached.
@ -115,8 +116,9 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
concurrently executing plucks at any time. */
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline, void *reserved);
GRPC_API grpc_event
grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline, void *reserved);
/** Maximum number of outstanding grpc_completion_queue_pluck executions per
completion queue */
@ -129,11 +131,11 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
After calling this function applications should ensure that no
NEW work is added to be published on this completion queue. */
void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
GRPC_API void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
/** Destroy a completion queue. The caller must ensure that the queue is
drained and no threads are executing grpc_completion_queue_next */
void grpc_completion_queue_destroy(grpc_completion_queue *cq);
GRPC_API void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/** Create a completion queue alarm instance associated to \a cq.
*
@ -141,26 +143,27 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cq);
* grpc_alarm_cancel), an event with tag \a tag will be added to \a cq. If the
* alarm expired, the event's success bit will be true, false otherwise (ie,
* upon cancellation). */
grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
void *tag);
GRPC_API grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq,
gpr_timespec deadline, void *tag);
/** Cancel a completion queue alarm. Calling this function over an alarm that
* has already fired has no effect. */
void grpc_alarm_cancel(grpc_alarm *alarm);
GRPC_API void grpc_alarm_cancel(grpc_alarm *alarm);
/** Destroy the given completion queue alarm, cancelling it in the process. */
void grpc_alarm_destroy(grpc_alarm *alarm);
GRPC_API void grpc_alarm_destroy(grpc_alarm *alarm);
/** Check the connectivity state of a channel. */
grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel *channel, int try_to_connect);
GRPC_API grpc_connectivity_state
grpc_channel_check_connectivity_state(grpc_channel *channel,
int try_to_connect);
/** Watch for a change in connectivity state.
Once the channel connectivity state is different from last_observed_state,
tag will be enqueued on cq with success=1.
If deadline expires BEFORE the state is changed, tag will be enqueued on cq
with success=0. */
void grpc_channel_watch_connectivity_state(
GRPC_API void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
@ -170,24 +173,24 @@ void grpc_channel_watch_connectivity_state(
If parent_call is non-NULL, it must be a server-side call. It will be used
to propagate properties from the server call to this new client call.
*/
grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *completion_queue,
const char *method, const char *host,
gpr_timespec deadline, void *reserved);
GRPC_API grpc_call *grpc_channel_create_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, const char *method,
const char *host, gpr_timespec deadline, void *reserved);
/** Ping the channels peer (load balanced channels will select one sub-channel
to ping); if the channel is not connected, posts a failed. */
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
void *tag, void *reserved);
GRPC_API void grpc_channel_ping(grpc_channel *channel,
grpc_completion_queue *cq, void *tag,
void *reserved);
/** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved);
GRPC_API void *grpc_channel_register_call(grpc_channel *channel,
const char *method, const char *host,
void *reserved);
/** Create a call given a handle returned from grpc_channel_register_call */
grpc_call *grpc_channel_create_registered_call(
GRPC_API grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved);
@ -203,8 +206,9 @@ grpc_call *grpc_channel_create_registered_call(
needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just
receive operations. */
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag, void *reserved);
GRPC_API grpc_call_error grpc_call_start_batch(grpc_call *call,
const grpc_op *ops, size_t nops,
void *tag, void *reserved);
/** Returns a newly allocated string representing the endpoint to which this
call is communicating with. The string is in the uri format accepted by
@ -214,38 +218,36 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
WARNING: this value is never authenticated or subject to any security
related code. It must not be used for any authentication related
functionality. Instead, use grpc_auth_context. */
char *grpc_call_get_peer(grpc_call *call);
GRPC_API char *grpc_call_get_peer(grpc_call *call);
struct census_context;
/* Set census context for a call; Must be called before first call to
grpc_call_start_batch(). */
void grpc_census_call_set_context(grpc_call *call,
struct census_context *context);
GRPC_API void grpc_census_call_set_context(grpc_call *call,
struct census_context *context);
/* Retrieve the calls current census context. */
struct census_context *grpc_census_call_get_context(grpc_call *call);
GRPC_API struct census_context *grpc_census_call_get_context(grpc_call *call);
/** Return a newly allocated string representing the target a channel was
created for. */
char *grpc_channel_get_target(grpc_channel *channel);
GRPC_API char *grpc_channel_get_target(grpc_channel *channel);
/** Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_args *args,
void *reserved);
GRPC_API grpc_channel *grpc_insecure_channel_create(
const char *target, const grpc_channel_args *args, void *reserved);
/** Create a lame client: this client fails every operation attempted on it. */
grpc_channel *grpc_lame_client_channel_create(const char *target,
grpc_status_code error_code,
const char *error_message);
GRPC_API grpc_channel *grpc_lame_client_channel_create(
const char *target, grpc_status_code error_code, const char *error_message);
/** Close and destroy a grpc channel */
void grpc_channel_destroy(grpc_channel *channel);
GRPC_API void grpc_channel_destroy(grpc_channel *channel);
/* Error handling for grpc_call
Most grpc_call functions return a grpc_error. If the error is not GRPC_OK
@ -258,7 +260,7 @@ void grpc_channel_destroy(grpc_channel *channel);
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
GRPC_API grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
@ -266,14 +268,13 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
and description passed in.
Importantly, this function does not send status nor description to the
remote endpoint. */
grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
grpc_status_code status,
const char *description,
void *reserved);
GRPC_API grpc_call_error
grpc_call_cancel_with_status(grpc_call *call, grpc_status_code status,
const char *description, void *reserved);
/** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
void grpc_call_destroy(grpc_call *call);
GRPC_API void grpc_call_destroy(grpc_call *call);
/** Request notification of a new call.
Once a call is received, a notification tagged with \a tag_new is added to
@ -283,11 +284,13 @@ void grpc_call_destroy(grpc_call *call);
to \a cq_bound_to_call.
Note that \a cq_for_notification must have been registered to the server via
\a grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
GRPC_API grpc_call_error
grpc_server_request_call(grpc_server *server, grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification,
void *tag_new);
/** Registers a method in the server.
Methods to this (host, method) pair will not be reported by
@ -296,13 +299,14 @@ grpc_call_error grpc_server_request_call(
registered_method (as returned by this function).
Must be called before grpc_server_start.
Returns NULL on failure. */
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host);
GRPC_API void *grpc_server_register_method(grpc_server *server,
const char *method,
const char *host);
/** Request notification of a new pre-registered call. 'cq_for_notification'
must have been registered to the server via
grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_registered_call(
GRPC_API grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata,
grpc_byte_buffer **optional_payload,
@ -313,23 +317,25 @@ grpc_call_error grpc_server_request_registered_call(
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved);
GRPC_API grpc_server *grpc_server_create(const grpc_channel_args *args,
void *reserved);
/** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq,
void *reserved);
GRPC_API void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq,
void *reserved);
/** Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr);
GRPC_API int grpc_server_add_insecure_http2_port(grpc_server *server,
const char *addr);
/** Start a server - tells all listeners to start listening */
void grpc_server_start(grpc_server *server);
GRPC_API void grpc_server_start(grpc_server *server);
/** Begin shutting down a server.
After completion, no new calls or connections will be admitted.
@ -338,18 +344,19 @@ void grpc_server_start(grpc_server *server);
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag);
GRPC_API void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq,
void *tag);
/** Cancel all in-progress calls.
Only usable after shutdown. */
void grpc_server_cancel_all_calls(grpc_server *server);
GRPC_API void grpc_server_cancel_all_calls(grpc_server *server);
/** Destroy a server.
Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */
void grpc_server_destroy(grpc_server *server);
GRPC_API void grpc_server_destroy(grpc_server *server);
/** Enable or disable a tracer.
@ -359,17 +366,18 @@ void grpc_server_destroy(grpc_server *server);
Use of this function is not strictly thread-safe, but the
thread-safety issues raised by it should not be of concern. */
int grpc_tracer_set_enabled(const char *name, int enabled);
GRPC_API int grpc_tracer_set_enabled(const char *name, int enabled);
/** Check whether a metadata key is legal (will be accepted by core) */
int grpc_header_key_is_legal(const char *key, size_t length);
GRPC_API int grpc_header_key_is_legal(const char *key, size_t length);
/** Check whether a non-binary metadata value is legal (will be accepted by
core) */
int grpc_header_nonbin_value_is_legal(const char *value, size_t length);
GRPC_API int grpc_header_nonbin_value_is_legal(const char *value,
size_t length);
/** Check whether a metadata key corresponds to a binary value */
int grpc_is_binary_header(const char *key, size_t length);
GRPC_API int grpc_is_binary_header(const char *key, size_t length);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -65,37 +65,39 @@ typedef struct grpc_auth_property {
} grpc_auth_property;
/* Returns NULL when the iterator is at the end. */
const grpc_auth_property *grpc_auth_property_iterator_next(
GRPC_API const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator *it);
/* Iterates over the auth context. */
grpc_auth_property_iterator grpc_auth_context_property_iterator(
const grpc_auth_context *ctx);
GRPC_API grpc_auth_property_iterator
grpc_auth_context_property_iterator(const grpc_auth_context *ctx);
/* Gets the peer identity. Returns an empty iterator (first _next will return
NULL) if the peer is not authenticated. */
grpc_auth_property_iterator grpc_auth_context_peer_identity(
const grpc_auth_context *ctx);
GRPC_API grpc_auth_property_iterator
grpc_auth_context_peer_identity(const grpc_auth_context *ctx);
/* Finds a property in the context. May return an empty iterator (first _next
will return NULL) if no property with this name was found in the context. */
grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
const grpc_auth_context *ctx, const char *name);
GRPC_API grpc_auth_property_iterator
grpc_auth_context_find_properties_by_name(const grpc_auth_context *ctx,
const char *name);
/* Gets the name of the property that indicates the peer identity. Will return
NULL if the peer is not authenticated. */
const char *grpc_auth_context_peer_identity_property_name(
GRPC_API const char *grpc_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx);
/* Returns 1 if the peer is authenticated, 0 otherwise. */
int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx);
GRPC_API int grpc_auth_context_peer_is_authenticated(
const grpc_auth_context *ctx);
/* Gets the auth context from the call. Caller needs to call
grpc_auth_context_release on the returned context. */
grpc_auth_context *grpc_call_auth_context(grpc_call *call);
GRPC_API grpc_auth_context *grpc_call_auth_context(grpc_call *call);
/* Releases the auth context returned from grpc_call_auth_context. */
void grpc_auth_context_release(grpc_auth_context *context);
GRPC_API void grpc_auth_context_release(grpc_auth_context *context);
/* --
The following auth context methods should only be called by a server metadata
@ -103,18 +105,20 @@ void grpc_auth_context_release(grpc_auth_context *context);
-- */
/* Add a property. */
void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
const char *value, size_t value_length);
GRPC_API void grpc_auth_context_add_property(grpc_auth_context *ctx,
const char *name,
const char *value,
size_t value_length);
/* Add a C string property. */
void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
const char *name,
const char *value);
GRPC_API void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
const char *name,
const char *value);
/* Sets the property name. Returns 1 if successful or 0 in case of failure
(which means that no property with this name exists). */
int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
const char *name);
GRPC_API int grpc_auth_context_set_peer_identity_property_name(
grpc_auth_context *ctx, const char *name);
/* --- grpc_channel_credentials object. ---
@ -125,7 +129,7 @@ typedef struct grpc_channel_credentials grpc_channel_credentials;
/* Releases a channel credentials object.
The creator of the credentials object is responsible for its release. */
void grpc_channel_credentials_release(grpc_channel_credentials *creds);
GRPC_API void grpc_channel_credentials_release(grpc_channel_credentials *creds);
/* Environment variable that points to the google default application
credentials json key or refresh token. Used in the
@ -135,7 +139,7 @@ void grpc_channel_credentials_release(grpc_channel_credentials *creds);
/* Creates default credentials to connect to a google gRPC service.
WARNING: Do NOT use this credentials to connect to a non-google service as
this could result in an oauth2 token leak. */
grpc_channel_credentials *grpc_google_default_credentials_create(void);
GRPC_API grpc_channel_credentials *grpc_google_default_credentials_create(void);
/* Environment variable that points to the default SSL roots file. This file
must be a PEM encoded file with all the roots such as the one that can be
@ -143,6 +147,29 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void);
#define GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR \
"GRPC_DEFAULT_SSL_ROOTS_FILE_PATH"
/* Results for the SSL roots override callback. */
typedef enum {
GRPC_SSL_ROOTS_OVERRIDE_OK,
GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY, /* Do not try fallback options. */
GRPC_SSL_ROOTS_OVERRIDE_FAIL
} grpc_ssl_roots_override_result;
/* Callback for getting the SSL roots override from the application.
In case of success, *pem_roots_certs must be set to a NULL terminated string
containing the list of PEM encoded root certificates. The ownership is passed
to the core and freed (laster by the core) with gpr_free.
If this function fails and GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment is
set to a valid path, it will override the roots specified this func */
typedef grpc_ssl_roots_override_result (*grpc_ssl_roots_override_callback)(
char **pem_root_certs);
/* Setup a callback to override the default TLS/SSL roots.
This function is not thread-safe and must be called at initialization time
before any ssl credentials are created to have the desired side effect.
If GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment is set to a valid path, the
callback will not be called. */
void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb);
/* Object that holds a private key / certificate chain pair in PEM format. */
typedef struct {
/* private_key is the NULL-terminated string containing the PEM encoding of
@ -159,12 +186,13 @@ typedef struct {
of the server root certificates. If this parameter is NULL, the
implementation will first try to dereference the file pointed by the
GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment variable, and if that fails,
get the roots from a well-known place on disk (in the grpc install
directory).
try to get the roots set by grpc_override_ssl_default_roots. Eventually,
if all these fail, it will try to get the roots from a well-known place on
disk (in the grpc install directory).
- pem_key_cert_pair is a pointer on the object containing client's private
key and certificate chain. This parameter can be NULL if the client does
not have such a key/cert pair. */
grpc_channel_credentials *grpc_ssl_credentials_create(
GRPC_API grpc_channel_credentials *grpc_ssl_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
void *reserved);
@ -178,22 +206,22 @@ typedef struct grpc_call_credentials grpc_call_credentials;
/* Releases a call credentials object.
The creator of the credentials object is responsible for its release. */
void grpc_call_credentials_release(grpc_call_credentials *creds);
GRPC_API void grpc_call_credentials_release(grpc_call_credentials *creds);
/* Creates a composite channel credentials object. */
grpc_channel_credentials *grpc_composite_channel_credentials_create(
GRPC_API grpc_channel_credentials *grpc_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
void *reserved);
/* Creates a composite call credentials object. */
grpc_call_credentials *grpc_composite_call_credentials_create(
GRPC_API grpc_call_credentials *grpc_composite_call_credentials_create(
grpc_call_credentials *creds1, grpc_call_credentials *creds2,
void *reserved);
/* Creates a compute engine credentials object for connecting to Google.
WARNING: Do NOT use this credentials to connect to a non-google service as
this could result in an oauth2 token leak. */
grpc_call_credentials *grpc_google_compute_engine_credentials_create(
GRPC_API grpc_call_credentials *grpc_google_compute_engine_credentials_create(
void *reserved);
extern const gpr_timespec grpc_max_auth_token_lifetime;
@ -203,8 +231,10 @@ extern const gpr_timespec grpc_max_auth_token_lifetime;
- token_lifetime is the lifetime of each Json Web Token (JWT) created with
this credentials. It should not exceed grpc_max_auth_token_lifetime or
will be cropped to this value. */
grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
const char *json_key, gpr_timespec token_lifetime, void *reserved);
GRPC_API grpc_call_credentials *
grpc_service_account_jwt_access_credentials_create(const char *json_key,
gpr_timespec token_lifetime,
void *reserved);
/* Creates an Oauth2 Refresh Token credentials object for connecting to Google.
May return NULL if the input is invalid.
@ -212,16 +242,16 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
this could result in an oauth2 token leak.
- json_refresh_token is the JSON string containing the refresh token itself
along with a client_id and client_secret. */
grpc_call_credentials *grpc_google_refresh_token_credentials_create(
GRPC_API grpc_call_credentials *grpc_google_refresh_token_credentials_create(
const char *json_refresh_token, void *reserved);
/* Creates an Oauth2 Access Token credentials with an access token that was
aquired by an out of band mechanism. */
grpc_call_credentials *grpc_access_token_credentials_create(
GRPC_API grpc_call_credentials *grpc_access_token_credentials_create(
const char *access_token, void *reserved);
/* Creates an IAM credentials object for connecting to Google. */
grpc_call_credentials *grpc_google_iam_credentials_create(
GRPC_API grpc_call_credentials *grpc_google_iam_credentials_create(
const char *authorization_token, const char *authority_selector,
void *reserved);
@ -283,16 +313,15 @@ typedef struct {
} grpc_metadata_credentials_plugin;
/* Creates a credentials object from a plugin. */
grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
GRPC_API grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void *reserved);
/* --- Secure channel creation. --- */
/* Creates a secure channel using the passed-in credentials. */
grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
const char *target,
const grpc_channel_args *args,
void *reserved);
GRPC_API grpc_channel *grpc_secure_channel_create(
grpc_channel_credentials *creds, const char *target,
const grpc_channel_args *args, void *reserved);
/* --- grpc_server_credentials object. ---
@ -303,7 +332,7 @@ typedef struct grpc_server_credentials grpc_server_credentials;
/* Releases a server_credentials object.
The creator of the server_credentials object is responsible for its release.
*/
void grpc_server_credentials_release(grpc_server_credentials *creds);
GRPC_API void grpc_server_credentials_release(grpc_server_credentials *creds);
/* Creates an SSL server_credentials object.
- pem_roots_cert is the NULL-terminated string containing the PEM encoding of
@ -316,7 +345,7 @@ void grpc_server_credentials_release(grpc_server_credentials *creds);
- force_client_auth, if set to non-zero will force the client to authenticate
with an SSL cert. Note that this option is ignored if pem_root_certs is
NULL. */
grpc_server_credentials *grpc_ssl_server_credentials_create(
GRPC_API grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void *reserved);
@ -325,15 +354,16 @@ grpc_server_credentials *grpc_ssl_server_credentials_create(
/* Add a HTTP2 over an encrypted link over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_server_credentials *creds);
GRPC_API int grpc_server_add_secure_http2_port(grpc_server *server,
const char *addr,
grpc_server_credentials *creds);
/* --- Call specific credentials. --- */
/* Sets a credentials to a call. Can only be called on the client side before
grpc_call_start_batch. */
grpc_call_error grpc_call_set_credentials(grpc_call *call,
grpc_call_credentials *creds);
GRPC_API grpc_call_error
grpc_call_set_credentials(grpc_call *call, grpc_call_credentials *creds);
/* --- Auth Metadata Processing --- */
@ -364,7 +394,7 @@ typedef struct {
void *state;
} grpc_auth_metadata_processor;
void grpc_server_credentials_set_auth_metadata_processor(
GRPC_API void grpc_server_credentials_set_auth_metadata_processor(
grpc_server_credentials *creds, grpc_auth_metadata_processor processor);
#ifdef __cplusplus

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -50,7 +50,7 @@ extern "C" {
#endif
/** Register zookeeper name resolver in grpc */
void grpc_zookeeper_register();
GRPC_API void grpc_zookeeper_register();
#ifdef __cplusplus
}

@ -36,6 +36,8 @@
#include <stddef.h>
#include <grpc/impl/codegen/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -47,23 +49,23 @@ typedef struct gpr_allocation_functions {
} gpr_allocation_functions;
/* malloc, never returns NULL */
void *gpr_malloc(size_t size);
GPR_API void *gpr_malloc(size_t size);
/* free */
void gpr_free(void *ptr);
GPR_API void gpr_free(void *ptr);
/* realloc, never returns NULL */
void *gpr_realloc(void *p, size_t size);
GPR_API void *gpr_realloc(void *p, size_t size);
/* aligned malloc, never returns NULL, will align to 1 << alignment_log */
void *gpr_malloc_aligned(size_t size, size_t alignment_log);
GPR_API void *gpr_malloc_aligned(size_t size, size_t alignment_log);
/* free memory allocated by gpr_malloc_aligned */
void gpr_free_aligned(void *ptr);
GPR_API void gpr_free_aligned(void *ptr);
/** Request the family of allocation functions in \a functions be used. NOTE
* that this request will be honored in a *best effort* basis and that no
* guarantees are made about the default functions (eg, malloc) being called. */
void gpr_set_allocation_functions(gpr_allocation_functions functions);
GPR_API void gpr_set_allocation_functions(gpr_allocation_functions functions);
/** Return the family of allocation functions currently in effect. */
gpr_allocation_functions gpr_get_allocation_functions();
GPR_API gpr_allocation_functions gpr_get_allocation_functions();
#ifdef __cplusplus
}

@ -65,8 +65,8 @@ typedef struct grpc_byte_buffer grpc_byte_buffer;
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
size_t nslices);
GRPC_API grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
@ -74,43 +74,44 @@ grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
GRPC_API grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
GRPC_API grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
GRPC_API size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
GRPC_API void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer */
void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
GRPC_API void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
GRPC_API void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* gpr_slice_unref on the result. */
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
gpr_slice *slice);
GRPC_API int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
gpr_slice *slice);
/** Merge all data from \a reader into single slice */
gpr_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
GRPC_API gpr_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
GRPC_API grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
#ifdef __cplusplus

@ -37,6 +37,8 @@
#include <stdlib.h> /* for abort() */
#include <stdarg.h>
#include <grpc/impl/codegen/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -69,11 +71,11 @@ const char *gpr_log_severity_string(gpr_log_severity severity);
/* Log a message. It's advised to use GPR_xxx above to generate the context
* for each message */
void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...);
GPR_API void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...);
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
const char *message);
GPR_API void gpr_log_message(const char *file, int line,
gpr_log_severity severity, const char *message);
/* Log overrides: applications can use this API to intercept logging calls
and use their own implementations */
@ -86,7 +88,7 @@ typedef struct {
} gpr_log_func_args;
typedef void (*gpr_log_func)(gpr_log_func_args *args);
void gpr_set_log_function(gpr_log_func func);
GPR_API void gpr_set_log_function(gpr_log_func func);
/* abort() the process if x is zero, having written a line to the log.

@ -80,6 +80,7 @@
#define GPR_ARCH_64 1
#define GPR_GETPID_IN_PROCESS_H 1
#define GPR_WINSOCK_SOCKET 1
#define GPR_WINDOWS_SUBPROCESS 1
#ifdef __GNUC__
#define GPR_GCC_ATOMIC 1
#define GPR_GCC_TLS 1
@ -94,6 +95,7 @@
#define GPR_WIN32 1
#define GPR_GETPID_IN_PROCESS_H 1
#define GPR_WINSOCK_SOCKET 1
#define GPR_WINDOWS_SUBPROCESS 1
#ifdef __GNUC__
#define GPR_GCC_ATOMIC 1
#define GPR_GCC_TLS 1
@ -337,4 +339,16 @@
} while (0)
#endif /* GPR_FORBID_UNREACHABLE_CODE */
#ifndef GPR_API
#define GPR_API
#endif
#ifndef GRPC_API
#define GRPC_API GPR_API
#endif
#ifndef CENSUS_API
#define CENSUS_API GRPC_API
#endif
#endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */

@ -105,7 +105,7 @@ typedef struct gpr_slice {
/* Increment the refcount of s. Requires slice is initialized.
Returns s. */
gpr_slice gpr_slice_ref(gpr_slice s);
GPR_API gpr_slice gpr_slice_ref(gpr_slice s);
/* Decrement the ref count of s. If the ref count of s reaches zero, all
slices sharing the ref count are destroyed, and considered no longer
@ -113,22 +113,22 @@ gpr_slice gpr_slice_ref(gpr_slice s);
len, dest) where dest!=NULL , then (*dest)(start) is called, else if s is
ultimately derived from a call to gpr_slice_new_with_len(start, len, dest)
where dest!=NULL , then (*dest)(start, len). Requires s initialized. */
void gpr_slice_unref(gpr_slice s);
GPR_API void gpr_slice_unref(gpr_slice s);
/* Create a slice pointing at some data. Calls malloc to allocate a refcount
for the object, and arranges that destroy will be called with the pointer
passed in at destruction. */
gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *));
GPR_API gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *));
/* Equivalent to gpr_slice_new, but with a two argument destroy function that
also takes the slice length. */
gpr_slice gpr_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t));
GPR_API gpr_slice
gpr_slice_new_with_len(void *p, size_t len, void (*destroy)(void *, size_t));
/* Equivalent to gpr_slice_new(malloc(len), len, free), but saves one malloc()
call.
Aborts if malloc() fails. */
gpr_slice gpr_slice_malloc(size_t length);
GPR_API gpr_slice gpr_slice_malloc(size_t length);
/* Create a slice by copying a string.
Does not preserve null terminators.
@ -136,44 +136,44 @@ gpr_slice gpr_slice_malloc(size_t length);
size_t len = strlen(source);
gpr_slice slice = gpr_slice_malloc(len);
memcpy(slice->data, source, len); */
gpr_slice gpr_slice_from_copied_string(const char *source);
GPR_API gpr_slice gpr_slice_from_copied_string(const char *source);
/* Create a slice by copying a buffer.
Equivalent to:
gpr_slice slice = gpr_slice_malloc(len);
memcpy(slice->data, source, len); */
gpr_slice gpr_slice_from_copied_buffer(const char *source, size_t len);
GPR_API gpr_slice gpr_slice_from_copied_buffer(const char *source, size_t len);
/* Create a slice pointing to constant memory */
gpr_slice gpr_slice_from_static_string(const char *source);
GPR_API gpr_slice gpr_slice_from_static_string(const char *source);
/* Return a result slice derived from s, which shares a ref count with s, where
result.data==s.data+begin, and result.length==end-begin.
The ref count of s is increased by one.
Requires s initialized, begin <= end, begin <= s.length, and
end <= source->length. */
gpr_slice gpr_slice_sub(gpr_slice s, size_t begin, size_t end);
GPR_API gpr_slice gpr_slice_sub(gpr_slice s, size_t begin, size_t end);
/* The same as gpr_slice_sub, but without altering the ref count */
gpr_slice gpr_slice_sub_no_ref(gpr_slice s, size_t begin, size_t end);
GPR_API gpr_slice gpr_slice_sub_no_ref(gpr_slice s, size_t begin, size_t end);
/* Splits s into two: modifies s to be s[0:split], and returns a new slice,
sharing a refcount with s, that contains s[split:s.length].
Requires s intialized, split <= s.length */
gpr_slice gpr_slice_split_tail(gpr_slice *s, size_t split);
GPR_API gpr_slice gpr_slice_split_tail(gpr_slice *s, size_t split);
/* Splits s into two: modifies s to be s[split:s.length], and returns a new
slice, sharing a refcount with s, that contains s[0:split].
Requires s intialized, split <= s.length */
gpr_slice gpr_slice_split_head(gpr_slice *s, size_t split);
GPR_API gpr_slice gpr_slice_split_head(gpr_slice *s, size_t split);
gpr_slice gpr_empty_slice(void);
GPR_API gpr_slice gpr_empty_slice(void);
/* Returns <0 if a < b, ==0 if a == b, >0 if a > b
The order is arbitrary, and is not guaranteed to be stable across different
versions of the API. */
int gpr_slice_cmp(gpr_slice a, gpr_slice b);
int gpr_slice_str_cmp(gpr_slice a, const char *b);
GPR_API int gpr_slice_cmp(gpr_slice a, gpr_slice b);
GPR_API int gpr_slice_str_cmp(gpr_slice a, const char *b);
#ifdef __cplusplus
}

@ -59,13 +59,13 @@ typedef struct {
} gpr_slice_buffer;
/* initialize a slice buffer */
void gpr_slice_buffer_init(gpr_slice_buffer *sb);
GPR_API void gpr_slice_buffer_init(gpr_slice_buffer *sb);
/* destroy a slice buffer - unrefs any held elements */
void gpr_slice_buffer_destroy(gpr_slice_buffer *sb);
GPR_API void gpr_slice_buffer_destroy(gpr_slice_buffer *sb);
/* Add an element to a slice buffer - takes ownership of the slice.
This function is allowed to concatenate the passed in slice to the end of
some other slice if desired by the slice buffer. */
void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice slice);
GPR_API void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice slice);
/* add an element to a slice buffer - takes ownership of the slice and returns
the index of the slice.
Guarantees that the slice will not be concatenated at the end of another
@ -73,27 +73,30 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice slice);
slice at the returned index in sb->slices)
The implementation MAY decide to concatenate data at the end of a small
slice added in this fashion. */
size_t gpr_slice_buffer_add_indexed(gpr_slice_buffer *sb, gpr_slice slice);
void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *slices, size_t n);
GPR_API size_t
gpr_slice_buffer_add_indexed(gpr_slice_buffer *sb, gpr_slice slice);
GPR_API void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *slices,
size_t n);
/* add a very small (less than 8 bytes) amount of data to the end of a slice
buffer: returns a pointer into which to add the data */
uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t len);
GPR_API uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t len);
/* pop the last buffer, but don't unref it */
void gpr_slice_buffer_pop(gpr_slice_buffer *sb);
GPR_API void gpr_slice_buffer_pop(gpr_slice_buffer *sb);
/* clear a slice buffer, unref all elements */
void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb);
GPR_API void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb);
/* swap the contents of two slice buffers */
void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b);
GPR_API void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b);
/* move all of the elements of src into dst */
void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst);
GPR_API void gpr_slice_buffer_move_into(gpr_slice_buffer *src,
gpr_slice_buffer *dst);
/* remove n bytes from the end of a slice buffer */
void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *garbage);
GPR_API void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *garbage);
/* move the first n bytes of src into dst */
void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *dst);
GPR_API void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *dst);
/* take the first slice in the slice buffer */
gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *src);
GPR_API gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *src);
#ifdef __cplusplus
}

@ -78,26 +78,26 @@ extern "C" {
gpr_mu are uninitialized when first declared. */
/* Initialize *mu. Requires: *mu uninitialized. */
void gpr_mu_init(gpr_mu *mu);
GPR_API void gpr_mu_init(gpr_mu *mu);
/* Cause *mu no longer to be initialized, freeing any memory in use. Requires:
*mu initialized; no other concurrent operation on *mu. */
void gpr_mu_destroy(gpr_mu *mu);
GPR_API void gpr_mu_destroy(gpr_mu *mu);
/* Wait until no thread has a lock on *mu, cause the calling thread to own an
exclusive lock on *mu, then return. May block indefinitely or crash if the
calling thread has a lock on *mu. Requires: *mu initialized. */
void gpr_mu_lock(gpr_mu *mu);
GPR_API void gpr_mu_lock(gpr_mu *mu);
/* Release an exclusive lock on *mu held by the calling thread. Requires: *mu
initialized; the calling thread holds an exclusive lock on *mu. */
void gpr_mu_unlock(gpr_mu *mu);
GPR_API void gpr_mu_unlock(gpr_mu *mu);
/* Without blocking, attempt to acquire an exclusive lock on *mu for the
calling thread, then return non-zero iff success. Fail, if any thread holds
the lock; succeeds with high probability if no thread holds the lock.
Requires: *mu initialized. */
int gpr_mu_trylock(gpr_mu *mu);
GPR_API int gpr_mu_trylock(gpr_mu *mu);
/* --- Condition variable interface ---
@ -106,11 +106,11 @@ int gpr_mu_trylock(gpr_mu *mu);
uninitialized when first declared. */
/* Initialize *cv. Requires: *cv uninitialized. */
void gpr_cv_init(gpr_cv *cv);
GPR_API void gpr_cv_init(gpr_cv *cv);
/* Cause *cv no longer to be initialized, freeing any memory in use. Requires:
*cv initialized; no other concurrent operation on *cv.*/
void gpr_cv_destroy(gpr_cv *cv);
GPR_API void gpr_cv_destroy(gpr_cv *cv);
/* Atomically release *mu and wait on *cv. When the calling thread is woken
from *cv or the deadline abs_deadline is exceeded, execute gpr_mu_lock(mu)
@ -118,16 +118,16 @@ void gpr_cv_destroy(gpr_cv *cv);
abs_deadline==gpr_inf_future for no deadline. May return even when not
woken explicitly. Requires: *mu and *cv initialized; the calling thread
holds an exclusive lock on *mu. */
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline);
GPR_API int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline);
/* If any threads are waiting on *cv, wake at least one.
Clients may treat this as an optimization of gpr_cv_broadcast()
for use in the case where waking more than one waiter is not useful.
Requires: *cv initialized. */
void gpr_cv_signal(gpr_cv *cv);
GPR_API void gpr_cv_signal(gpr_cv *cv);
/* Wake all threads waiting on *cv. Requires: *cv initialized. */
void gpr_cv_broadcast(gpr_cv *cv);
GPR_API void gpr_cv_broadcast(gpr_cv *cv);
/* --- One-time initialization ---
@ -140,7 +140,7 @@ void gpr_cv_broadcast(gpr_cv *cv);
If multiple threads call gpr_once() on the same gpr_once instance, one of
them will call (*init_routine)(), and the others will block until that call
finishes.*/
void gpr_once_init(gpr_once *once, void (*init_routine)(void));
GPR_API void gpr_once_init(gpr_once *once, void (*init_routine)(void));
/* --- One-time event notification ---
@ -150,43 +150,43 @@ void gpr_once_init(gpr_once *once, void (*init_routine)(void));
It requires no destruction. */
/* Initialize *ev. */
void gpr_event_init(gpr_event *ev);
GPR_API void gpr_event_init(gpr_event *ev);
/* Set *ev so that gpr_event_get() and gpr_event_wait() will return value.
Requires: *ev initialized; value != NULL; no prior or concurrent calls to
gpr_event_set(ev, ...) since initialization. */
void gpr_event_set(gpr_event *ev, void *value);
GPR_API void gpr_event_set(gpr_event *ev, void *value);
/* Return the value set by gpr_event_set(ev, ...), or NULL if no such call has
completed. If the result is non-NULL, all operations that occurred prior to
the gpr_event_set(ev, ...) set will be visible after this call returns.
Requires: *ev initialized. This operation is faster than acquiring a mutex
on most platforms. */
void *gpr_event_get(gpr_event *ev);
GPR_API void *gpr_event_get(gpr_event *ev);
/* Wait until *ev is set by gpr_event_set(ev, ...), or abs_deadline is
exceeded, then return gpr_event_get(ev). Requires: *ev initialized. Use
abs_deadline==gpr_inf_future for no deadline. When the event has been
signalled before the call, this operation is faster than acquiring a mutex
on most platforms. */
void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline);
GPR_API void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline);
/* --- Reference counting ---
These calls act on the type gpr_refcount. It requires no destruction. */
/* Initialize *r to value n. */
void gpr_ref_init(gpr_refcount *r, int n);
GPR_API void gpr_ref_init(gpr_refcount *r, int n);
/* Increment the reference count *r. Requires *r initialized. */
void gpr_ref(gpr_refcount *r);
GPR_API void gpr_ref(gpr_refcount *r);
/* Increment the reference count *r by n. Requires *r initialized, n > 0. */
void gpr_refn(gpr_refcount *r, int n);
GPR_API void gpr_refn(gpr_refcount *r, int n);
/* Decrement the reference count *r and return non-zero iff it has reached
zero. . Requires *r initialized. */
int gpr_unref(gpr_refcount *r);
GPR_API int gpr_unref(gpr_refcount *r);
/* --- Stats counters ---
@ -197,13 +197,13 @@ int gpr_unref(gpr_refcount *r);
synchronize other events. */
/* Initialize *c to the value n. */
void gpr_stats_init(gpr_stats_counter *c, intptr_t n);
GPR_API void gpr_stats_init(gpr_stats_counter *c, intptr_t n);
/* *c += inc. Requires: *c initialized. */
void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc);
GPR_API void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc);
/* Return *c. Requires: *c initialized. */
intptr_t gpr_stats_read(const gpr_stats_counter *c);
GPR_API intptr_t gpr_stats_read(const gpr_stats_counter *c);
/* ==================Example use of interface===================
A producer-consumer queue of up to N integers,

@ -69,9 +69,10 @@ typedef struct gpr_timespec {
} gpr_timespec;
/* Time constants. */
gpr_timespec gpr_time_0(gpr_clock_type type); /* The zero time interval. */
gpr_timespec gpr_inf_future(gpr_clock_type type); /* The far future */
gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */
GPR_API gpr_timespec
gpr_time_0(gpr_clock_type type); /* The zero time interval. */
GPR_API gpr_timespec gpr_inf_future(gpr_clock_type type); /* The far future */
GPR_API gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */
#define GPR_MS_PER_SEC 1000
#define GPR_US_PER_SEC 1000000
@ -81,45 +82,46 @@ gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */
#define GPR_US_PER_MS 1000
/* initialize time subsystem */
void gpr_time_init(void);
GPR_API void gpr_time_init(void);
/* Return the current time measured from the given clocks epoch. */
gpr_timespec gpr_now(gpr_clock_type clock);
GPR_API gpr_timespec gpr_now(gpr_clock_type clock);
/* Convert a timespec from one clock to another */
gpr_timespec gpr_convert_clock_type(gpr_timespec t,
gpr_clock_type target_clock);
GPR_API gpr_timespec
gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock);
/* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b
respectively. */
int gpr_time_cmp(gpr_timespec a, gpr_timespec b);
GPR_API int gpr_time_cmp(gpr_timespec a, gpr_timespec b);
gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b);
gpr_timespec gpr_time_min(gpr_timespec a, gpr_timespec b);
GPR_API gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b);
GPR_API gpr_timespec gpr_time_min(gpr_timespec a, gpr_timespec b);
/* Add and subtract times. Calculations saturate at infinities. */
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b);
gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b);
GPR_API gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b);
GPR_API gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b);
/* Return a timespec representing a given number of time units. LONG_MIN is
interpreted as gpr_inf_past, and LONG_MAX as gpr_inf_future. */
gpr_timespec gpr_time_from_micros(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_nanos(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_millis(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_micros(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_nanos(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_millis(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type);
GPR_API gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type);
int32_t gpr_time_to_millis(gpr_timespec timespec);
GPR_API int32_t gpr_time_to_millis(gpr_timespec timespec);
/* Return 1 if two times are equal or within threshold of each other,
0 otherwise */
int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold);
GPR_API int gpr_time_similar(gpr_timespec a, gpr_timespec b,
gpr_timespec threshold);
/* Sleep until at least 'until' - an absolute timeout */
void gpr_sleep_until(gpr_timespec until);
GPR_API void gpr_sleep_until(gpr_timespec until);
double gpr_timespec_to_micros(gpr_timespec t);
GPR_API double gpr_timespec_to_micros(gpr_timespec t);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -69,23 +69,23 @@ typedef struct gpr_avl {
} gpr_avl;
/** create an immutable AVL tree */
gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable);
GPR_API gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable);
/** add a reference to an existing tree - returns
the tree as a convenience */
gpr_avl gpr_avl_ref(gpr_avl avl);
GPR_API gpr_avl gpr_avl_ref(gpr_avl avl);
/** remove a reference to a tree - destroying it if there
are no references left */
void gpr_avl_unref(gpr_avl avl);
GPR_API void gpr_avl_unref(gpr_avl avl);
/** return a new tree with (key, value) added to avl.
implicitly unrefs avl to allow easy chaining.
if key exists in avl, the new tree's key entry updated
(i.e. a duplicate is not created) */
gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value);
GPR_API gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value);
/** return a new tree with key deleted */
gpr_avl gpr_avl_remove(gpr_avl avl, void *key);
GPR_API gpr_avl gpr_avl_remove(gpr_avl avl, void *key);
/** lookup key, and return the associated value.
does not mutate avl.
returns NULL if key is not found. */
void *gpr_avl_get(gpr_avl avl, void *key);
GPR_API void *gpr_avl_get(gpr_avl avl, void *key);
#endif

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,6 +34,8 @@
#ifndef GRPC_SUPPORT_CMDLINE_H
#define GRPC_SUPPORT_CMDLINE_H
#include <grpc/support/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -68,31 +70,31 @@ typedef struct gpr_cmdline gpr_cmdline;
/* Construct a command line parser: takes a short description of the tool
doing the parsing */
gpr_cmdline *gpr_cmdline_create(const char *description);
GPR_API gpr_cmdline *gpr_cmdline_create(const char *description);
/* Add an integer parameter, with a name (used on the command line) and some
helpful text (used in the command usage) */
void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name, const char *help,
int *value);
GPR_API void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name,
const char *help, int *value);
/* The same, for a boolean flag */
void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name, const char *help,
int *value);
GPR_API void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name,
const char *help, int *value);
/* And for a string */
void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, const char *help,
char **value);
GPR_API void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name,
const char *help, char **value);
/* Set a callback for non-named arguments */
void gpr_cmdline_on_extra_arg(
GPR_API void gpr_cmdline_on_extra_arg(
gpr_cmdline *cl, const char *name, const char *help,
void (*on_extra_arg)(void *user_data, const char *arg), void *user_data);
/* Enable surviving failure: default behavior is to exit the process */
void gpr_cmdline_set_survive_failure(gpr_cmdline *cl);
GPR_API void gpr_cmdline_set_survive_failure(gpr_cmdline *cl);
/* Parse the command line; returns 1 on success, on failure either dies
(by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
called */
int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
GPR_API int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Destroy the parser */
void gpr_cmdline_destroy(gpr_cmdline *cl);
GPR_API void gpr_cmdline_destroy(gpr_cmdline *cl);
/* Get a string describing usage */
char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0);
GPR_API char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,6 +34,8 @@
#ifndef GRPC_SUPPORT_CPU_H
#define GRPC_SUPPORT_CPU_H
#include <grpc/support/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -42,13 +44,13 @@ extern "C" {
/* Return the number of CPU cores on the current system. Will return 0 if
the information is not available. */
unsigned gpr_cpu_num_cores(void);
GPR_API unsigned gpr_cpu_num_cores(void);
/* Return the CPU on which the current thread is executing; N.B. This should
be considered advisory only - it is possible that the thread is switched
to a different CPU at any time. Returns a value in range
[0, gpr_cpu_num_cores() - 1] */
unsigned gpr_cpu_current_cpu(void);
GPR_API unsigned gpr_cpu_current_cpu(void);
#ifdef __cplusplus
} // extern "C"

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -43,31 +43,34 @@ extern "C" {
typedef struct gpr_histogram gpr_histogram;
gpr_histogram *gpr_histogram_create(double resolution, double max_bucket_start);
void gpr_histogram_destroy(gpr_histogram *h);
void gpr_histogram_add(gpr_histogram *h, double x);
GPR_API gpr_histogram *gpr_histogram_create(double resolution,
double max_bucket_start);
GPR_API void gpr_histogram_destroy(gpr_histogram *h);
GPR_API void gpr_histogram_add(gpr_histogram *h, double x);
/* The following merges the second histogram into the first. It only works
if they have the same buckets and resolution. Returns 0 on failure, 1
on success */
int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src);
GPR_API int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src);
double gpr_histogram_percentile(gpr_histogram *histogram, double percentile);
double gpr_histogram_mean(gpr_histogram *histogram);
double gpr_histogram_stddev(gpr_histogram *histogram);
double gpr_histogram_variance(gpr_histogram *histogram);
double gpr_histogram_maximum(gpr_histogram *histogram);
double gpr_histogram_minimum(gpr_histogram *histogram);
double gpr_histogram_count(gpr_histogram *histogram);
double gpr_histogram_sum(gpr_histogram *histogram);
double gpr_histogram_sum_of_squares(gpr_histogram *histogram);
GPR_API double gpr_histogram_percentile(gpr_histogram *histogram,
double percentile);
GPR_API double gpr_histogram_mean(gpr_histogram *histogram);
GPR_API double gpr_histogram_stddev(gpr_histogram *histogram);
GPR_API double gpr_histogram_variance(gpr_histogram *histogram);
GPR_API double gpr_histogram_maximum(gpr_histogram *histogram);
GPR_API double gpr_histogram_minimum(gpr_histogram *histogram);
GPR_API double gpr_histogram_count(gpr_histogram *histogram);
GPR_API double gpr_histogram_sum(gpr_histogram *histogram);
GPR_API double gpr_histogram_sum_of_squares(gpr_histogram *histogram);
const uint32_t *gpr_histogram_get_contents(gpr_histogram *histogram,
size_t *count);
void gpr_histogram_merge_contents(gpr_histogram *histogram,
const uint32_t *data, size_t data_count,
double min_seen, double max_seen, double sum,
double sum_of_squares, double count);
GPR_API const uint32_t *gpr_histogram_get_contents(gpr_histogram *histogram,
size_t *count);
GPR_API void gpr_histogram_merge_contents(gpr_histogram *histogram,
const uint32_t *data,
size_t data_count, double min_seen,
double max_seen, double sum,
double sum_of_squares, double count);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,6 +34,8 @@
#ifndef GRPC_SUPPORT_HOST_PORT_H
#define GRPC_SUPPORT_HOST_PORT_H
#include <grpc/support/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -48,14 +50,14 @@ extern "C" {
destroyed using gpr_free().
In the unlikely event of an error, returns -1 and sets *out to NULL. */
int gpr_join_host_port(char **out, const char *host, int port);
GPR_API int gpr_join_host_port(char **out, const char *host, int port);
/* Given a name in the form "host:port" or "[ho:st]:port", split into hostname
and port number, into newly allocated strings, which must later be
destroyed using gpr_free().
Return 1 on success, 0 on failure. Guarantees *host and *port == NULL on
failure. */
int gpr_split_host_port(const char *name, char **host, char **port);
GPR_API int gpr_split_host_port(const char *name, char **host, char **port);
#ifdef __cplusplus
}

@ -42,7 +42,7 @@ extern "C" {
* formatted error message, corresponding to the error messageid.
* Use in conjunction with GetLastError() et al.
*/
char *gpr_format_message(int messageid);
GPR_API char *gpr_format_message(int messageid);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -42,7 +42,7 @@ extern "C" {
/* Returns a copy of src that can be passed to gpr_free().
If allocation fails or if src is NULL, returns NULL. */
char *gpr_strdup(const char *src);
GPR_API char *gpr_strdup(const char *src);
/* printf to a newly-allocated string. The set of supported formats may vary
between platforms.
@ -52,7 +52,7 @@ char *gpr_strdup(const char *src);
On error, returns -1 and sets *strp to NULL. If the format string is bad,
the result is undefined. */
int gpr_asprintf(char **strp, const char *format, ...);
GPR_API int gpr_asprintf(char **strp, const char *format, ...);
#ifdef __cplusplus
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,6 +34,8 @@
#ifndef GRPC_SUPPORT_SUBPROCESS_H
#define GRPC_SUPPORT_SUBPROCESS_H
#include <grpc/support/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -41,14 +43,14 @@ extern "C" {
typedef struct gpr_subprocess gpr_subprocess;
/* .exe on windows, empty on unices */
const char *gpr_subprocess_binary_extension();
GPR_API const char *gpr_subprocess_binary_extension();
gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
GPR_API gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
/* if subprocess has not been joined, kill it */
void gpr_subprocess_destroy(gpr_subprocess *p);
GPR_API void gpr_subprocess_destroy(gpr_subprocess *p);
/* returns exit status; can be called at most once */
int gpr_subprocess_join(gpr_subprocess *p);
void gpr_subprocess_interrupt(gpr_subprocess *p);
GPR_API int gpr_subprocess_join(gpr_subprocess *p);
GPR_API void gpr_subprocess_interrupt(gpr_subprocess *p);
#ifdef __cplusplus
} // extern "C"

@ -59,30 +59,30 @@ typedef struct {
in *t, and return true. If there are insufficient resources, return false.
If options==NULL, default options are used.
The thread is immediately runnable, and exits when (*thd_body)() returns. */
int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
const gpr_thd_options *options);
GPR_API int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
const gpr_thd_options *options);
/* Return a gpr_thd_options struct with all fields set to defaults. */
gpr_thd_options gpr_thd_options_default(void);
GPR_API gpr_thd_options gpr_thd_options_default(void);
/* Set the thread to become detached on startup - this is the default. */
void gpr_thd_options_set_detached(gpr_thd_options *options);
GPR_API void gpr_thd_options_set_detached(gpr_thd_options *options);
/* Set the thread to become joinable - mutually exclusive with detached. */
void gpr_thd_options_set_joinable(gpr_thd_options *options);
GPR_API void gpr_thd_options_set_joinable(gpr_thd_options *options);
/* Returns non-zero if the option detached is set. */
int gpr_thd_options_is_detached(const gpr_thd_options *options);
GPR_API int gpr_thd_options_is_detached(const gpr_thd_options *options);
/* Returns non-zero if the option joinable is set. */
int gpr_thd_options_is_joinable(const gpr_thd_options *options);
GPR_API int gpr_thd_options_is_joinable(const gpr_thd_options *options);
/* Returns the identifier of the current thread. */
gpr_thd_id gpr_thd_currentid(void);
GPR_API gpr_thd_id gpr_thd_currentid(void);
/* Blocks until the specified thread properly terminates.
Calling this on a detached thread has unpredictable results. */
void gpr_thd_join(gpr_thd_id t);
GPR_API void gpr_thd_join(gpr_thd_id t);
#ifdef __cplusplus
}

@ -322,6 +322,7 @@
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/surface/alarm.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_reader.c",
@ -373,6 +374,7 @@
"src/core/census/context.c",
"src/core/census/initialize.c",
"src/core/census/operation.c",
"src/core/census/placeholders.c",
"src/core/census/tag_set.c",
"src/core/census/tracing.c",
"third_party/zlib/crc32.h",
@ -489,6 +491,7 @@
"src/core/support/string_posix.c",
"src/core/support/string_win32.c",
"src/core/support/subprocess_posix.c",
"src/core/support/subprocess_windows.c",
"src/core/support/sync.c",
"src/core/support/sync_posix.c",
"src/core/support/sync_win32.c",

@ -3,6 +3,9 @@
[coverage:run]
plugins = Cython.Coverage
[build]
build_base=python_build
[build_ext]
inplace=1

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -91,7 +91,7 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
}
static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
int success) {
bool success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;

@ -0,0 +1,114 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/census.h>
#include <grpc/support/log.h>
/* Placeholders for the pending APIs */
census_tag_set *census_context_tag_set(census_context *context) {
(void)context;
abort();
}
int census_get_trace_record(census_trace_record *trace_record) {
(void)trace_record;
abort();
}
void census_record_values(census_context *context, census_value *values,
size_t nvalues) {
(void)context;
(void)values;
(void)nvalues;
abort();
}
void census_set_rpc_client_peer(census_context *context, const char *peer) {
(void)context;
(void)peer;
abort();
}
void census_trace_scan_end() { abort(); }
int census_trace_scan_start(int consume) {
(void)consume;
abort();
}
const census_aggregation *census_view_aggregrations(const census_view *view) {
(void)view;
abort();
}
census_view *census_view_create(uint32_t metric_id, const census_tag_set *tags,
const census_aggregation *aggregations,
size_t naggregations) {
(void)metric_id;
(void)tags;
(void)aggregations;
(void)naggregations;
abort();
}
const census_tag_set *census_view_tags(const census_view *view) {
(void)view;
abort();
}
void census_view_delete(census_view *view) {
(void)view;
abort();
}
const census_view_data *census_view_get_data(const census_view *view) {
(void)view;
abort();
}
size_t census_view_metric(const census_view *view) {
(void)view;
abort();
}
size_t census_view_naggregations(const census_view *view) {
(void)view;
abort();
}
void census_view_reset(census_view *view) {
(void)view;
abort();
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -135,7 +135,7 @@ static void on_lb_policy_state_changed_locked(
}
static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
lb_policy_connectivity_watcher *w = arg;
gpr_mu_lock(&w->chand->mu_config);
@ -161,7 +161,7 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
}
static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
@ -191,7 +191,8 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures);
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
@ -249,7 +250,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
grpc_resolver *destroy_resolver = NULL;
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
GPR_ASSERT(op->set_accept_stream == NULL);
if (op->bind_pollset != NULL) {
@ -268,7 +269,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, false, NULL);
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
@ -310,15 +311,15 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
continue_picking_args *cpa = arg;
if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
} else if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->connected_subchannel, cpa->on_ready)) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 1);
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, true, NULL);
}
gpr_free(cpa);
}
@ -346,7 +347,7 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
}
}
gpr_mu_unlock(&chand->mu_config);
@ -497,7 +498,7 @@ typedef struct {
} external_connectivity_watcher;
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
external_connectivity_watcher *w = arg;
grpc_closure *follow_up = w->on_complete;
grpc_pollset_set_del_pollset(exec_ctx, &w->chand->interested_parties,

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -79,7 +79,7 @@ typedef struct client_uchannel_channel_data {
typedef grpc_subchannel_call_holder call_data;
static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
channel_data *chand = arg;
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
chand->subchannel_connectivity,
@ -105,7 +105,7 @@ static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
GPR_ASSERT(op->set_accept_stream == NULL);
GPR_ASSERT(op->bind_pollset == NULL);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -153,7 +153,7 @@ static void process_send_initial_metadata(
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, bool success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_reset_and_unref(&calld->slices);
@ -183,7 +183,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_next_op(exec_ctx, elem, &calld->send_op);
}
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, bool success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);

@ -1,5 +1,5 @@
/*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -80,7 +80,7 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
client_recv_filter_args a;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -131,7 +131,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
}
}
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -43,9 +43,9 @@
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
int success);
bool success);
static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
int success);
bool success);
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
@ -166,7 +166,7 @@ retry:
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_subchannel_call_holder *holder = arg;
grpc_subchannel_call *call;
gpr_mu_lock(&holder->mu);
@ -209,10 +209,11 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), 1);
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), true,
NULL);
}
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, int success) {
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, bool success) {
retry_ops_args *a = args;
size_t i;
for (i = 0; i < a->nops; i++) {
@ -240,9 +241,10 @@ static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
size_t i;
for (i = 0; i < holder->waiting_ops_count; i++) {
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].on_complete, false,
NULL);
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].recv_message_ready,
0);
false, NULL);
}
holder->waiting_ops_count = 0;
}

@ -121,7 +121,7 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
*pp->target = NULL;
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
pp = next;
}
@ -140,7 +140,7 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@ -209,7 +209,7 @@ int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
}
static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
size_t num_subchannels = p->num_subchannels;
@ -230,7 +230,7 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
}
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
@ -272,15 +272,15 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
grpc_exec_ctx_enqueue(
exec_ctx, grpc_closure_create(destroy_subchannels, p), true, NULL);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected;
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
@ -327,7 +327,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@ -374,7 +374,7 @@ void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -237,7 +237,7 @@ void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
gpr_free(pp);
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
@ -263,7 +263,7 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@ -336,7 +336,7 @@ int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
}
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
@ -376,7 +376,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
@ -428,7 +428,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
} else {
@ -479,7 +479,7 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -93,7 +93,7 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@ -182,7 +182,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
if (r->resolved_config) {
grpc_client_config_ref(r->resolved_config);
}
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -98,7 +98,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@ -153,7 +153,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "sockaddr");
r->published = 1;
*r->target_config = cfg;
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
r->next_completion = NULL;
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -420,7 +420,7 @@ static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
if (r->resolved_config != NULL) {
grpc_client_config_ref(r->resolved_config);
}
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, true, NULL);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}

@ -145,7 +145,7 @@ struct grpc_subchannel_call {
static gpr_timespec compute_connect_deadline(grpc_subchannel *c);
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
int iomgr_success);
bool iomgr_success);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define REF_REASON reason
@ -175,7 +175,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
*/
static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
grpc_connected_subchannel *c = arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
@ -198,7 +198,7 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
*/
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
grpc_subchannel *c = arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(c->args);
@ -268,7 +268,7 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(subchannel_destroy, c),
1);
true, NULL);
}
}
@ -341,7 +341,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c) {
}
static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
external_state_watcher *w = arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
@ -413,7 +413,7 @@ void grpc_connected_subchannel_process_transport_op(
}
static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
int iomgr_success) {
bool iomgr_success) {
state_watcher *sw = p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@ -594,7 +594,7 @@ static void update_reconnect_parameters(grpc_subchannel *c) {
gpr_time_add(c->next_attempt, gpr_time_from_millis(jitter, GPR_TIMESPAN));
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_success) {
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, bool iomgr_success) {
grpc_subchannel *c = arg;
gpr_mu_lock(&c->mu);
c->have_alarm = 0;
@ -611,7 +611,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_success) {
}
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
grpc_subchannel *c = arg;
if (c->connecting_result.transport != NULL) {
@ -647,7 +647,7 @@ static gpr_timespec compute_connect_deadline(grpc_subchannel *c) {
*/
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
int success) {
bool success) {
grpc_subchannel_call *c = call;
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -114,13 +114,13 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
gpr_free(req);
}
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success);
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success);
static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read);
}
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
internal_request *req = user_data;
size_t i;
@ -147,7 +147,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
do_read(exec_ctx, req);
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
internal_request *req = arg;
if (success) {
on_written(exec_ctx, req);
@ -175,7 +175,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
start_write(exec_ctx, req);
}
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
internal_request *req = arg;
if (!req->ep) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -43,7 +43,7 @@ void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
}
void grpc_closure_list_add(grpc_closure_list *closure_list,
grpc_closure *closure, int success) {
grpc_closure *closure, bool success) {
if (closure == NULL) return;
closure->final_data = (success != 0);
if (closure_list->head == NULL) {
@ -54,7 +54,7 @@ void grpc_closure_list_add(grpc_closure_list *closure_list,
closure_list->tail = closure;
}
int grpc_closure_list_empty(grpc_closure_list closure_list) {
bool grpc_closure_list_empty(grpc_closure_list closure_list) {
return closure_list.head == NULL;
}
@ -77,7 +77,7 @@ typedef struct {
grpc_closure wrapper;
} wrapped_closure;
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
wrapped_closure *wc = arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,6 +35,7 @@
#define GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#include <grpc/support/port_platform.h>
#include <stdbool.h>
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
@ -54,7 +55,7 @@ typedef struct grpc_closure_list {
* \param success An indication on the state of the iomgr. On false, cleanup
* actions should be taken (eg, shutdown). */
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
int success);
bool success);
/** A closure over a grpc_iomgr_cb_func. */
struct grpc_closure {
@ -83,13 +84,13 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
/** add \a closure to the end of \a list and set \a closure's success to \a
* success */
void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
int success);
bool success);
/** append all closures from \a src to \a dst and empty \a src. */
void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
int grpc_closure_list_empty(grpc_closure_list list);
bool grpc_closure_list_empty(grpc_closure_list list);
/** return the next pointer for a queued closure list */
grpc_closure *grpc_closure_next(grpc_closure *closure);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,16 +37,16 @@
#include "src/core/profiling/timers.h"
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int did_something = 0;
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
int success = (int)(c->final_data & 1);
bool success = (bool)(c->final_data & 1);
grpc_closure *next = (grpc_closure *)(c->final_data & ~(uintptr_t)1);
did_something++;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, success);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
@ -62,11 +62,15 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
}
void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
int success) {
bool success,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure_list_add(&exec_ctx->closure_list, closure, success);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list) {
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure_list_move(list, &exec_ctx->closure_list);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -57,22 +57,29 @@ struct grpc_exec_ctx {
grpc_closure_list closure_list;
};
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
struct grpc_workqueue;
typedef struct grpc_workqueue grpc_workqueue;
#define GRPC_EXEC_CTX_INIT \
{ GRPC_CLOSURE_LIST_INIT }
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
* Returns 1 if work was performed, 0 otherwise. */
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
* Returns true if work was performed, false otherwise. */
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
/** Add a closure to be executed at the next flush/finish point */
void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
int success);
bool success,
grpc_workqueue *offload_target_or_null);
/** Add a list of closures to be executed at the next flush/finish point.
* Leaves \a list empty. */
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list);
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null);
#endif

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -77,7 +77,7 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu);
break;
} else {
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
}
gpr_mu_unlock(&g_executor.mu);
grpc_exec_ctx_flush(&exec_ctx);
@ -112,7 +112,7 @@ static void maybe_spawn_locked() {
g_executor.pending_join = 1;
}
void grpc_executor_enqueue(grpc_closure *closure, int success) {
void grpc_executor_enqueue(grpc_closure *closure, bool success) {
gpr_mu_lock(&g_executor.mu);
if (g_executor.shutting_down == 0) {
grpc_closure_list_add(&g_executor.closures, closure, success);
@ -133,7 +133,7 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -45,7 +45,7 @@ void grpc_executor_init();
/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate
* thread */
void grpc_executor_enqueue(grpc_closure *closure, int success);
void grpc_executor_enqueue(grpc_closure *closure, bool success);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown();

@ -218,7 +218,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
} else {
grpc_remove_fd_from_all_epoll_sets(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, true, NULL);
}
int grpc_fd_wrapped_fd(grpc_fd *fd) {
@ -273,7 +273,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown);
grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown, NULL);
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
@ -296,7 +296,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown);
grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown, NULL);
*st = CLOSURE_NOT_READY;
return 1;
}

@ -120,7 +120,7 @@ void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline) {
info->has_pending_iocp = 1;
}
gpr_mu_unlock(&socket->state_mu);
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
}
void grpc_iocp_init(void) {
@ -183,7 +183,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
info->has_pending_iocp = 0;
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
} else {
info->closure = closure;
}

@ -141,7 +141,7 @@ static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_status) {
bool iomgr_status) {
delayed_add *da = arg;
if (!grpc_fd_is_orphaned(da->fd)) {
@ -154,7 +154,7 @@ static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
/* We don't care about this pollset anymore. */
if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
da->pollset->called_shutdown = 1;
grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, 1);
grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, true, NULL);
}
}
gpr_mu_unlock(&da->pollset->mu);
@ -178,7 +178,7 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
GRPC_FD_REF(fd, "delayed_add");
grpc_closure_init(&da->closure, perform_delayed_add, da);
pollset->in_flight_cbs++;
grpc_exec_ctx_enqueue(exec_ctx, &da->closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &da->closure, true, NULL);
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -243,7 +243,7 @@ void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
pollset->vtable->finish_shutdown(pollset);
grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, 1);
grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, true, NULL);
}
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -271,7 +271,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
goto done;
}
/* Check alarms - these are a global resource so we just ping
@ -365,7 +365,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@ -381,7 +381,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!grpc_pollset_has_workers(pollset)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
}
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
!grpc_pollset_has_workers(pollset)) {
@ -419,7 +419,8 @@ typedef struct grpc_unary_promote_args {
grpc_closure promotion_closure;
} grpc_unary_promote_args;
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, int success) {
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
bool success) {
grpc_unary_promote_args *up_args = args;
const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
grpc_pollset *pollset = up_args->pollset;

@ -107,7 +107,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
} else {
pollset->on_shutdown = closure;
}
@ -165,7 +165,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, true, NULL);
pollset->on_shutdown = NULL;
}
goto done;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -152,7 +152,7 @@ done:
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, int success) {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, bool success) {
request *r = rp;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -135,7 +135,7 @@ done:
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, int success) {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, bool success) {
request *r = rp;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -91,7 +91,7 @@ error:
return 0;
}
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int success) {
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, bool success) {
int done;
async_connect *ac = acp;
if (grpc_tcp_trace) {
@ -111,7 +111,7 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int success) {
}
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, bool success) {
async_connect *ac = acp;
int so_error = 0;
socklen_t so_error_size;
@ -206,7 +206,7 @@ finish:
gpr_free(ac->addr_str);
gpr_free(ac);
}
grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL);
grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL, NULL);
}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
@ -243,7 +243,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
addr_len = sizeof(addr4_copy);
}
if (!prepare_socket(addr, fd)) {
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
return;
}
@ -259,14 +259,14 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
if (err >= 0) {
*ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, closure, true, NULL);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
grpc_exec_ctx_enqueue(exec_ctx, closure, false, NULL);
goto done;
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -74,7 +74,7 @@ static void async_connect_unlock_and_cleanup(async_connect *ac) {
}
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int occured) {
static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, bool occured) {
async_connect *ac = acp;
gpr_mu_lock(&ac->mu);
/* If the alarm didn't occur, it got cancelled. */
@ -84,7 +84,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int occured) {
async_connect_unlock_and_cleanup(ac);
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, int from_iocp) {
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, bool from_iocp) {
async_connect *ac = acp;
SOCKET sock = ac->socket->socket;
grpc_endpoint **ep = ac->endpoint;
@ -215,7 +215,7 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
grpc_exec_ctx_enqueue(exec_ctx, on_done, 0);
grpc_exec_ctx_enqueue(exec_ctx, on_done, false, NULL);
}
#endif /* GPR_WINSOCK_SOCKET */

@ -100,9 +100,9 @@ typedef struct {
} grpc_tcp;
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
int success);
bool success);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
int success);
bool success);
static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
@ -247,7 +247,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
int success) {
bool success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
@ -273,7 +273,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = 0;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, &tcp->read_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &tcp->read_closure, true, NULL);
}
}
@ -360,7 +360,7 @@ static flush_result tcp_flush(grpc_tcp *tcp) {
}
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
int success) {
bool success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
flush_result status;
grpc_closure *cb;
@ -407,7 +407,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, 1);
grpc_exec_ctx_enqueue(exec_ctx, cb, true, NULL);
return;
}
tcp->outgoing_buffer = buf;
@ -420,7 +420,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE);
grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE, NULL);
}
GPR_TIMER_END("tcp_write", 0);

@ -160,7 +160,7 @@ grpc_tcp_server *grpc_tcp_server_create(grpc_closure *shutdown_complete) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, true, NULL);
}
gpr_mu_destroy(&s->mu);
@ -174,7 +174,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(s);
}
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
bool success) {
grpc_tcp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
@ -317,7 +318,7 @@ error:
}
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_tcp_listener *sp = arg;
grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
sp->fd_index};
@ -602,7 +603,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);

@ -119,7 +119,7 @@ grpc_tcp_server *grpc_tcp_server_create(grpc_closure *shutdown_complete) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, true, NULL);
}
/* Now that the accepts have been aborted, we can destroy the sockets.
@ -173,7 +173,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);
@ -311,7 +311,7 @@ failure:
}
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, bool from_iocp) {
grpc_tcp_listener *sp = arg;
grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
SOCKET sock = sp->new_socket;

@ -138,7 +138,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, int success) {
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) {
grpc_tcp *tcp = tcpp;
grpc_closure *cb = tcp->read_cb;
grpc_winsocket *socket = tcp->socket;
@ -184,7 +184,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
return;
}
@ -208,7 +208,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, 1);
grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, true, NULL);
return;
}
@ -221,7 +221,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, 0);
grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, false, NULL);
return;
}
}
@ -230,7 +230,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, int success) {
static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) {
grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info;
@ -273,7 +273,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len;
if (tcp->shutting_down) {
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
return;
}
@ -301,9 +301,9 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
connection that has its send queue filled up. But if we don't, then we can
avoid doing an async write operation at all. */
if (info->wsa_error != WSAEWOULDBLOCK) {
int ok = 0;
bool ok = false;
if (status == 0) {
ok = 1;
ok = true;
GPR_ASSERT(bytes_sent == tcp->write_slices->length);
} else {
if (socket->read_info.wsa_error != WSAECONNRESET) {
@ -313,7 +313,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
}
if (allocated) gpr_free(allocated);
grpc_exec_ctx_enqueue(exec_ctx, cb, ok);
grpc_exec_ctx_enqueue(exec_ctx, cb, ok, NULL);
return;
}
@ -330,7 +330,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(tcp, "write");
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
return;
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -224,7 +224,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[shard_idx(timer)];
gpr_mu_lock(&shard->mu);
if (!timer->triggered) {
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, 0);
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
timer->triggered = 1;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@ -290,7 +290,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, success);
grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, success, NULL);
n++;
}
*new_min_deadline = compute_min_deadline(shard);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -47,9 +47,7 @@
#include "src/core/iomgr/workqueue_windows.h"
#endif
/** A workqueue represents a list of work to be executed asynchronously. */
struct grpc_workqueue;
typedef struct grpc_workqueue grpc_workqueue;
/* grpc_workqueue is forward declared in exec_ctx.h */
/** Create a work queue */
grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -45,7 +45,7 @@
#include "src/core/iomgr/fd_posix.h"
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success);
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success);
grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx) {
char name[32];
@ -110,7 +110,7 @@ void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
gpr_mu_unlock(&workqueue->mu);
}
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_workqueue *workqueue = arg;
if (!success) {

@ -793,7 +793,7 @@ static void md_only_test_destruct(grpc_call_credentials *creds) {
}
static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
void *user_data, int success) {
void *user_data, bool success) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
@ -812,7 +812,7 @@ static void md_only_test_get_request_metadata(
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
grpc_executor_enqueue(
grpc_closure_create(on_simulated_token_fetch_done, cb_arg), 1);
grpc_closure_create(on_simulated_token_fetch_done, cb_arg), true);
} else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -86,7 +86,7 @@ static void on_compute_engine_detection_http_response(
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
}
static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, int s) {
static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool s) {
grpc_pollset_destroy(p);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -61,10 +61,10 @@ typedef struct {
} grpc_security_handshake;
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *setup, int success);
void *setup, bool success);
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
int success);
bool success);
static void security_connector_remove_handshake(grpc_security_handshake *h) {
grpc_security_connector_handshake_list *node;
@ -198,7 +198,8 @@ static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
}
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *handshake, int success) {
void *handshake,
bool success) {
grpc_security_handshake *h = handshake;
size_t consumed_slice_size = 0;
tsi_result result = TSI_OK;
@ -265,7 +266,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
/* If handshake is NULL, the handshake is done. */
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx,
void *handshake, int success) {
void *handshake, bool success) {
grpc_security_handshake *h = handshake;
/* Make sure that write is OK. */

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -126,7 +126,7 @@ static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
}
static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
int success) {
bool success) {
if (grpc_trace_secure_endpoint) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
@ -137,11 +137,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
}
}
ep->read_buffer = NULL;
grpc_exec_ctx_enqueue(exec_ctx, ep->read_cb, success);
grpc_exec_ctx_enqueue(exec_ctx, ep->read_cb, success, NULL);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
unsigned i;
uint8_t keep_looping = 0;
tsi_result result = TSI_OK;
@ -315,7 +315,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, false, NULL);
return;
}

@ -61,6 +61,14 @@ static const char *installed_roots_path =
INSTALL_PREFIX "/share/grpc/roots.pem";
#endif
/* -- Overridden default roots. -- */
static grpc_ssl_roots_override_callback ssl_roots_override_cb = NULL;
void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb) {
ssl_roots_override_cb = cb;
}
/* -- Cipher suites. -- */
/* Defines the cipher suites that we accept by default. All these cipher suites
@ -595,23 +603,44 @@ static grpc_security_connector_vtable ssl_channel_vtable = {
static grpc_security_connector_vtable ssl_server_vtable = {
ssl_server_destroy, ssl_server_do_handshake, ssl_server_check_peer};
static gpr_slice default_pem_root_certs;
static gpr_slice compute_default_pem_root_certs_once(void) {
gpr_slice result = gpr_empty_slice();
static void init_default_pem_root_certs(void) {
/* First try to load the roots from the environment. */
char *default_root_certs_path =
gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR);
if (default_root_certs_path == NULL) {
default_pem_root_certs = gpr_empty_slice();
} else {
default_pem_root_certs = gpr_load_file(default_root_certs_path, 0, NULL);
if (default_root_certs_path != NULL) {
result = gpr_load_file(default_root_certs_path, 0, NULL);
gpr_free(default_root_certs_path);
}
/* Try overridden roots if needed. */
grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL;
if (GPR_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) {
char *pem_root_certs = NULL;
ovrd_res = ssl_roots_override_cb(&pem_root_certs);
if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) {
GPR_ASSERT(pem_root_certs != NULL);
result = gpr_slice_new(pem_root_certs, strlen(pem_root_certs), gpr_free);
}
}
/* Fall back to installed certs if needed. */
if (GPR_SLICE_IS_EMPTY(default_pem_root_certs)) {
default_pem_root_certs = gpr_load_file(installed_roots_path, 0, NULL);
if (GPR_SLICE_IS_EMPTY(result) &&
ovrd_res != GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY) {
result = gpr_load_file(installed_roots_path, 0, NULL);
}
return result;
}
static gpr_slice default_pem_root_certs;
static void init_default_pem_root_certs(void) {
default_pem_root_certs = compute_default_pem_root_certs_once();
}
gpr_slice grpc_get_default_ssl_roots_for_testing(void) {
return compute_default_pem_root_certs_once();
}
size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs) {

@ -209,6 +209,9 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
/* Gets the default ssl roots. */
size_t grpc_get_default_ssl_roots(const unsigned char **pem_root_certs);
/* Exposed for TESTING ONLY!. */
gpr_slice grpc_get_default_ssl_roots_for_testing(void);
/* Config for ssl servers. */
typedef struct {
unsigned char **pem_private_keys;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -153,7 +153,7 @@ static void on_md_processing_done(
}
static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
int success) {
bool success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;

@ -142,7 +142,7 @@ static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
on_accept, state);
}
static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, int success) {
static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, bool success) {
grpc_server_secure_state *state = statep;
if (state->destroy_callback != NULL) {
state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

@ -0,0 +1,141 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_WINDOWS_SUBPROCESS
#include <windows.h>
#include <string.h>
#include <tchar.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/subprocess.h>
#include "src/core/support/string.h"
#include "src/core/support/string_win32.h"
struct gpr_subprocess {
PROCESS_INFORMATION pi;
int joined;
int interrupted;
};
const char *gpr_subprocess_binary_extension() { return ".exe"; }
gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
gpr_subprocess *r;
STARTUPINFO si;
PROCESS_INFORMATION pi;
char *args = gpr_strjoin_sep(argv, argc, " ", NULL);
TCHAR *args_tchar;
args_tchar = gpr_char_to_tchar(args);
gpr_free(args);
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
memset(&pi, 0, sizeof(pi));
if (!CreateProcess(NULL, args_tchar, NULL, NULL, FALSE,
CREATE_NEW_PROCESS_GROUP, NULL, NULL, &si, &pi)) {
gpr_free(args_tchar);
return NULL;
}
gpr_free(args_tchar);
r = gpr_malloc(sizeof(gpr_subprocess));
memset(r, 0, sizeof(*r));
r->pi = pi;
return r;
}
void gpr_subprocess_destroy(gpr_subprocess *p) {
if (p) {
if (!p->joined) {
gpr_subprocess_interrupt(p);
gpr_subprocess_join(p);
}
if (p->pi.hProcess) {
CloseHandle(p->pi.hProcess);
}
if (p->pi.hThread) {
CloseHandle(p->pi.hThread);
}
gpr_free(p);
}
}
int gpr_subprocess_join(gpr_subprocess *p) {
DWORD dwExitCode;
if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
if (dwExitCode == STILL_ACTIVE) {
if (WaitForSingleObject(p->pi.hProcess, INFINITE) == WAIT_OBJECT_0) {
p->joined = 1;
goto getExitCode;
}
return -1; // failed to join
} else {
goto getExitCode;
}
} else {
return -1; // failed to get exit code
}
getExitCode:
if (p->interrupted) {
return 0;
}
if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
return dwExitCode;
} else {
return -1; // failed to get exit code
}
}
void gpr_subprocess_interrupt(gpr_subprocess *p) {
DWORD dwExitCode;
if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
if (dwExitCode == STILL_ACTIVE) {
gpr_log(GPR_INFO, "sending ctrl-break");
GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, p->pi.dwProcessId);
p->joined = 1;
p->interrupted = 1;
}
}
return;
}
#endif /* GPR_WINDOWS_SUBPROCESS */

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -48,7 +48,7 @@ struct grpc_alarm {
static void do_nothing_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *c) {}
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_alarm *alarm = arg;
grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, success,
do_nothing_end_completion, NULL, &alarm->completion);
@ -65,7 +65,7 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
grpc_timer_init(&exec_ctx, &alarm->alarm, deadline, alarm_cb, alarm,
gpr_now(GPR_CLOCK_MONOTONIC));
grpc_cq_begin_op(cq);
grpc_cq_begin_op(cq, tag);
grpc_exec_ctx_finish(&exec_ctx);
return alarm;
}

@ -229,9 +229,9 @@ static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
grpc_status_code status,
const char *description);
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
int success);
bool success);
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
int success);
bool success);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
@ -351,7 +351,7 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, int success) {
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, bool success) {
size_t i;
int ii;
grpc_call *c = call;
@ -688,13 +688,13 @@ typedef struct cancel_closure {
grpc_status_code status;
} cancel_closure;
static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, int success) {
static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
cancel_closure *cc = ccp;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel");
gpr_free(cc);
}
static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, int success) {
static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, bool success) {
grpc_transport_stream_op op;
cancel_closure *cc = ccp;
memset(&op, 0, sizeof(op));
@ -721,7 +721,7 @@ static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
cc->call = c;
cc->status = status;
GRPC_CALL_INTERNAL_REF(c, "cancel");
grpc_exec_ctx_enqueue(exec_ctx, &cc->closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &cc->closure, true, NULL);
return GRPC_CALL_OK;
}
@ -757,7 +757,7 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
return CALL_FROM_TOP_ELEM(elem);
}
static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_call *call = arg;
gpr_mu_lock(&call->mu);
call->have_alarm = 0;
@ -934,7 +934,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
if (bctl->is_notify_tag_closure) {
grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success);
grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success, NULL);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@ -974,7 +974,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
}
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
int success) {
bool success) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
@ -993,7 +993,7 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
}
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, int success) {
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, bool success) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
grpc_call *child_call;
@ -1066,7 +1066,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, int success) {
}
static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
int success) {
bool success) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -80,7 +80,7 @@ struct grpc_channel {
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, int success);
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, bool success);
grpc_channel *grpc_channel_create_from_filters(
grpc_exec_ctx *exec_ctx, const char *target,
@ -268,7 +268,7 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
}
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
bool iomgr_success) {
grpc_channel *channel = arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -165,11 +165,11 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
}
}
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, int success) {
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
partly_done(exec_ctx, pw, 1);
}
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, int success) {
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, bool success) {
partly_done(exec_ctx, pw, 0);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -80,11 +80,11 @@ static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
}
static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
connector_unref(exec_ctx, arg);
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
connector *c = arg;
grpc_closure *notify;
grpc_endpoint *tcp = c->tcp;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -53,7 +53,7 @@ static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(arg);
}
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
ping_result *pr = arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, success, ping_destroy, pr,
&pr->completion_storage);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -86,7 +86,7 @@ static gpr_mu g_freelist_mu;
grpc_completion_queue *g_freelist;
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
int success);
bool success);
void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
@ -169,7 +169,7 @@ void grpc_cq_internal_ref(grpc_completion_queue *cc) {
}
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
grpc_completion_queue *cc = arg;
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -78,8 +78,8 @@ static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
} else if (op->recv_trailing_metadata != NULL) {
fill_metadata(elem, op->recv_trailing_metadata);
}
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, false, NULL);
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, false, NULL);
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -128,14 +128,14 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
bool success) {
connector *c = arg;
grpc_security_connector_do_handshake(exec_ctx, &c->security_connector->base,
c->connecting_endpoint,
on_secure_handshake_done, c);
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static void connected(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
connector *c = arg;
grpc_closure *notify;
grpc_endpoint *tcp = c->newly_connecting_endpoint;

@ -260,7 +260,7 @@ struct shutdown_cleanup_args {
};
static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_status_ignored) {
bool iomgr_status_ignored) {
struct shutdown_cleanup_args *a = arg;
gpr_slice_unref(a->slice);
gpr_free(a);
@ -313,7 +313,7 @@ static void request_matcher_destroy(request_matcher *rm) {
gpr_stack_lockfree_destroy(rm->requests);
}
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, int success) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, bool success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
}
@ -328,7 +328,7 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
}
}
@ -392,7 +392,7 @@ static void orphan_channel(channel_data *chand) {
}
static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
int success) {
bool success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
@ -407,7 +407,8 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand) {
maybe_finish_shutdown(exec_ctx, chand->server);
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
grpc_exec_ctx_enqueue(exec_ctx, &chand->finish_destroy_channel_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &chand->finish_destroy_channel_closure, true,
NULL);
}
static void finish_start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_server *server,
@ -420,7 +421,7 @@ static void finish_start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_server *server,
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
return;
}
@ -569,7 +570,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
}
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
int success) {
bool success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
gpr_timespec op_deadline;
@ -609,7 +610,7 @@ static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
}
static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
int success) {
bool success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
if (success) {
@ -620,7 +621,7 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true, NULL);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
@ -653,7 +654,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
}
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
int iomgr_status_ignored) {
bool iomgr_status_ignored) {
channel_data *chand = cd;
grpc_server *server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
@ -985,7 +986,7 @@ void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
}
static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
int success) {
bool success) {
grpc_server *server = s;
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
@ -1140,7 +1141,8 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, true,
NULL);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
@ -1229,7 +1231,7 @@ done:
}
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
void *user_data, int success);
void *user_data, bool success);
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
gpr_slice slice = value->slice;
@ -1315,7 +1317,7 @@ static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
}
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc,
int success) {
bool success) {
requested_call *rc = prc;
grpc_call *call = *rc->call;
grpc_call_element *elem =

@ -82,7 +82,7 @@ static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
grpc_closure *destroy_done) {
grpc_tcp_server *tcp = tcpp;
grpc_tcp_server_unref(exec_ctx, tcp);
grpc_exec_ctx_enqueue(exec_ctx, destroy_done, 1);
grpc_exec_ctx_enqueue(exec_ctx, destroy_done, true, NULL);
}
int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {

@ -283,7 +283,7 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
GRPC_CHTTP2_WRITE_VARINT(key_index, 2, 0x40,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, 0x00,
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
add_header_data(st, gpr_slice_ref(value_slice));
}
@ -300,7 +300,7 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
len_val_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)len_val, 1);
GRPC_CHTTP2_WRITE_VARINT(key_index, 4, 0x00,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, 0x00,
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
add_header_data(st, gpr_slice_ref(value_slice));
}

@ -35,6 +35,7 @@
#define GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
#include <assert.h>
#include <stdbool.h>
#include "src/core/iomgr/endpoint.h"
#include "src/core/transport/chttp2/frame.h"
@ -67,6 +68,9 @@ typedef enum {
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/* streams waiting for the outgoing window in the writing path, they will be
* merged to the stalled list or writable list under transport lock. */
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
@ -488,7 +492,7 @@ void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing, int success);
void *transport_writing, bool success);
void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
@ -504,11 +508,11 @@ void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
/** Get a writable stream
returns non-zero if there was a stream available */
void grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
/** Get a writable stream
returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
@ -560,9 +564,12 @@ int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_stalled_by_transport(
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
void grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing, bool is_window_available);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -313,12 +313,27 @@ int grpc_chttp2_list_pop_check_read_ops(
return r;
}
void grpc_chttp2_list_add_stalled_by_transport(
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
bool is_window_available) {
grpc_chttp2_stream *stream;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
if (is_window_available) {
grpc_chttp2_list_add_writable_stream(&transport->global, &stream->global);
} else {
stream_list_add(transport, stream, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
}
}
int grpc_chttp2_list_pop_stalled_by_transport(

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -130,8 +130,8 @@ int grpc_chttp2_unlocking_check_writes(
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
}
}
if (stream_global->send_trailing_metadata) {
@ -188,7 +188,7 @@ void grpc_chttp2_perform_writes(
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_enqueue(exec_ctx, &transport_writing->done_cb, 1);
grpc_exec_ctx_enqueue(exec_ctx, &transport_writing->done_cb, true, NULL);
}
}
@ -273,8 +273,8 @@ static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
@ -312,8 +312,8 @@ static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
/* do nothing - already reffed */
}
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
@ -329,6 +329,10 @@ void grpc_chttp2_cleanup_writing(
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
bool is_window_available = transport_writing->outgoing_window > 0;
grpc_chttp2_list_flush_writing_stalled_by_transport(transport_writing,
is_window_available);
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {

@ -86,14 +86,14 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
/* forward declarations of various callbacks that we'll build closures around */
static void writing_action(grpc_exec_ctx *exec_ctx, void *t,
int iomgr_success_ignored);
bool iomgr_success_ignored);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
uint32_t value);
/** Endpoint callback to process incoming data */
static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success);
static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, bool success);
/** Start disconnection chain */
static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
@ -183,7 +183,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
and maybe they hold resources that need to be freed */
while (t->global.pings.next != &t->global.pings) {
grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 0);
grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, false, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@ -602,7 +602,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
t->parsing_active)) {
t->writing_active = 1;
REF_TRANSPORT(t, "writing");
grpc_exec_ctx_enqueue(exec_ctx, &t->writing_action, 1);
grpc_exec_ctx_enqueue(exec_ctx, &t->writing_action, true, NULL);
prevent_endpoint_shutdown(t);
}
check_read_ops(exec_ctx, &t->global);
@ -631,7 +631,7 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
}
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing_ptr, int success) {
void *transport_writing_ptr, bool success) {
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
grpc_chttp2_stream_global *stream_global;
@ -669,7 +669,7 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
}
static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
int iomgr_success_ignored) {
bool iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
GPR_TIMER_BEGIN("writing_action", 0);
grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
@ -759,7 +759,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->final_data |= 1;
}
if (closure->final_data < 2) {
grpc_exec_ctx_enqueue(exec_ctx, closure, closure->final_data == 0);
grpc_exec_ctx_enqueue(exec_ctx, closure, closure->final_data == 0, NULL);
}
*pclosure = NULL;
}
@ -777,7 +777,7 @@ static int contains_non_ok_status(
return 0;
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, int success) {}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, bool success) {}
static void perform_stream_op_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
@ -934,7 +934,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
for (ping = transport_global->pings.next; ping != &transport_global->pings;
ping = ping->next) {
if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 1);
grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, true, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@ -951,7 +951,7 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
lock(t);
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@ -1022,11 +1022,13 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
*stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
&stream_global->incoming_frames);
GPR_ASSERT(*stream_global->recv_message != NULL);
grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, 1);
grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, true,
NULL);
stream_global->recv_message_ready = NULL;
} else if (stream_global->published_trailing_metadata) {
*stream_global->recv_message = NULL;
grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, 1);
grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, true,
NULL);
stream_global->recv_message_ready = NULL;
}
}
@ -1336,7 +1338,7 @@ static void read_error_locked(grpc_exec_ctx *exec_ctx,
}
/* tcp read callback */
static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, bool success) {
size_t i;
int keep_reading = 0;
grpc_chttp2_transport *t = tp;
@ -1523,7 +1525,7 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
unlock(exec_ctx, bs->transport);
return 1;
} else if (bs->failed) {
grpc_exec_ctx_enqueue(exec_ctx, on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, on_complete, false, NULL);
unlock(exec_ctx, bs->transport);
return 0;
} else {
@ -1552,7 +1554,7 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bs->transport->mu);
if (bs->on_next != NULL) {
*bs->next = slice;
grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, 1);
grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, true, NULL);
bs->on_next = NULL;
} else {
gpr_slice_buffer_add(&bs->slices, slice);
@ -1567,7 +1569,7 @@ void grpc_chttp2_incoming_byte_stream_finished(
if (from_parsing_thread) {
gpr_mu_lock(&bs->transport->mu);
}
grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, 0);
grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, false, NULL);
bs->on_next = NULL;
bs->failed = 1;
if (from_parsing_thread) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -78,7 +78,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
} else {
success = 0;
}
grpc_exec_ctx_enqueue(exec_ctx, w->notify, success);
grpc_exec_ctx_enqueue(exec_ctx, w->notify, success, NULL);
gpr_free(w);
}
gpr_free(tracker->name);
@ -109,7 +109,7 @@ int grpc_connectivity_state_notify_on_state_change(
if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) {
grpc_exec_ctx_enqueue(exec_ctx, notify, 0);
grpc_exec_ctx_enqueue(exec_ctx, notify, false, NULL);
tracker->watchers = w->next;
gpr_free(w);
return 0;
@ -117,7 +117,7 @@ int grpc_connectivity_state_notify_on_state_change(
while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) {
grpc_exec_ctx_enqueue(exec_ctx, notify, 0);
grpc_exec_ctx_enqueue(exec_ctx, notify, false, NULL);
w->next = w->next->next;
gpr_free(rm_candidate);
return 0;
@ -128,7 +128,7 @@ int grpc_connectivity_state_notify_on_state_change(
} else {
if (tracker->current_state != *current) {
*current = tracker->current_state;
grpc_exec_ctx_enqueue(exec_ctx, notify, 1);
grpc_exec_ctx_enqueue(exec_ctx, notify, true, NULL);
} else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
w->current = current;
@ -158,7 +158,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
while ((w = tracker->watchers) != NULL) {
*w->current = tracker->current_state;
tracker->watchers = w->next;
grpc_exec_ctx_enqueue(exec_ctx, w->notify, 1);
grpc_exec_ctx_enqueue(exec_ctx, w->notify, true, NULL);
gpr_free(w);
}
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -59,7 +59,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, 1);
grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, true, NULL);
}
}
@ -125,8 +125,8 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) {
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, false, NULL);
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, false, NULL);
}
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
@ -150,7 +150,7 @@ typedef struct {
grpc_closure closure;
} close_message_data;
static void free_message(grpc_exec_ctx *exec_ctx, void *p, int iomgr_success) {
static void free_message(grpc_exec_ctx *exec_ctx, void *p, bool iomgr_success) {
close_message_data *cmd = p;
gpr_slice_unref(cmd->message);
if (cmd->then_call != NULL) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -81,16 +81,29 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount);
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
typedef struct grpc_transport_stream_op {
/** Send initial metadata to the peer, from the provided metadata batch. */
grpc_metadata_batch *send_initial_metadata;
/** Send trailing metadata to the peer, from the provided metadata batch. */
grpc_metadata_batch *send_trailing_metadata;
/** Send message data to the peer, from the provided byte stream. */
grpc_byte_stream *send_message;
/** Receive initial metadata from the stream, into provided metadata batch. */
grpc_metadata_batch *recv_initial_metadata;
/** Receive message data from the stream, into provided byte stream. */
grpc_byte_stream **recv_message;
/** Should be enqueued when one message is ready to be processed. */
grpc_closure *recv_message_ready;
/** Receive trailing metadata from the stream, into provided metadata batch.
*/
grpc_metadata_batch *recv_trailing_metadata;
/** Should be enqueued when all requested operations (excluding recv_message
which has its own closure) in a given batch have been completed. */
grpc_closure *on_complete;
/** If != GRPC_STATUS_OK, cancel this stream */

@ -49,6 +49,10 @@ endlocal
@rem copy resulting nuget packages to artifacts directory
xcopy /Y /I *.nupkg ..\..\artifacts\
@rem create a zipfile with the artifacts as well
powershell -Command "Add-Type -Assembly 'System.IO.Compression.FileSystem'; [System.IO.Compression.ZipFile]::CreateFromDirectory('..\..\artifacts', 'csharp_nugets.zip');"
xcopy /Y /I csharp_nugets.zip ..\..\artifacts\
goto :EOF
:error

@ -112,8 +112,8 @@ void InitCallErrorConstants(Local<Object> exports) {
Nan::Set(exports, Nan::New("callError").ToLocalChecked(), call_error);
Local<Value> OK(Nan::New<Uint32, uint32_t>(GRPC_CALL_OK));
Nan::Set(call_error, Nan::New("OK").ToLocalChecked(), OK);
Local<Value> ERROR(Nan::New<Uint32, uint32_t>(GRPC_CALL_ERROR));
Nan::Set(call_error, Nan::New("ERROR").ToLocalChecked(), ERROR);
Local<Value> CALL_ERROR(Nan::New<Uint32, uint32_t>(GRPC_CALL_ERROR));
Nan::Set(call_error, Nan::New("ERROR").ToLocalChecked(), CALL_ERROR);
Local<Value> NOT_ON_SERVER(
Nan::New<Uint32, uint32_t>(GRPC_CALL_ERROR_NOT_ON_SERVER));
Nan::Set(call_error, Nan::New("NOT_ON_SERVER").ToLocalChecked(),

@ -76,7 +76,7 @@ function unaryCall(call, callback) {
*/
function streamingCall(call) {
call.on('data', function(value) {
var payload = {body: zeroBuffer(value.repsonse_size)};
var payload = {body: zeroBuffer(value.response_size)};
call.write({payload: payload});
});
call.on('end', function() {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save