Merge github.com:grpc/grpc into min_stack

pull/10860/head
Craig Tiller 7 years ago
commit 1cb30c2764
  1. 2
      .clang_complete
  2. 2
      BUILD
  3. 31
      CMakeLists.txt
  4. 4
      INSTALL.md
  5. 37
      Makefile
  6. 83
      binding.gyp
  7. 11
      build.yaml
  8. 2
      composer.json
  9. 1
      config.m4
  10. 1
      config.w32
  11. 12
      doc/PROTOCOL-WEB.md
  12. 2
      doc/environment_variables.md
  13. 33
      doc/interop-test-descriptions.md
  14. 2
      examples/README.md
  15. 2
      examples/cpp/helloworld/README.md
  16. 2
      examples/csharp/helloworld-from-cli/README.md
  17. 2
      examples/csharp/helloworld/README.md
  18. 2
      examples/csharp/route_guide/README.md
  19. 4
      examples/node/README.md
  20. 2
      examples/node/dynamic_codegen/route_guide/README.md
  21. 2
      examples/node/static_codegen/route_guide/README.md
  22. 2
      examples/objective-c/auth_sample/AuthTestService.podspec
  23. 2
      examples/objective-c/auth_sample/README.md
  24. 2
      examples/objective-c/helloworld/HelloWorld.podspec
  25. 4
      examples/objective-c/helloworld/README.md
  26. 2
      examples/objective-c/route_guide/README.md
  27. 2
      examples/objective-c/route_guide/RouteGuide.podspec
  28. 4
      examples/php/README.md
  29. 2
      examples/php/route_guide/README.md
  30. 2
      examples/python/README.md
  31. 2
      examples/python/helloworld/README.md
  32. 2
      examples/python/multiplex/README.md
  33. 2
      examples/python/route_guide/README.md
  34. 2
      examples/ruby/README.md
  35. 2
      examples/ruby/route_guide/README.md
  36. 19
      gRPC-Core.podspec
  37. 2
      gRPC-ProtoRPC.podspec
  38. 2
      gRPC-RxLibrary.podspec
  39. 2
      gRPC.podspec
  40. 2
      grpc.gemspec
  41. 2
      include/grpc++/alarm.h
  42. 12
      include/grpc++/channel.h
  43. 2
      include/grpc++/grpc++.h
  44. 443
      include/grpc++/impl/codegen/async_stream.h
  45. 92
      include/grpc++/impl/codegen/async_unary_call.h
  46. 19
      include/grpc++/impl/codegen/call.h
  47. 2
      include/grpc++/impl/codegen/call_hook.h
  48. 34
      include/grpc++/impl/codegen/channel_interface.h
  49. 34
      include/grpc++/impl/codegen/client_context.h
  50. 4
      include/grpc++/impl/codegen/client_unary_call.h
  51. 56
      include/grpc++/impl/codegen/completion_queue.h
  52. 2
      include/grpc++/impl/codegen/completion_queue_tag.h
  53. 2
      include/grpc++/impl/codegen/metadata_map.h
  54. 8
      include/grpc++/impl/codegen/method_handler_impl.h
  55. 3
      include/grpc++/impl/codegen/rpc_method.h
  56. 3
      include/grpc++/impl/codegen/rpc_service_method.h
  57. 29
      include/grpc++/impl/codegen/server_context.h
  58. 40
      include/grpc++/impl/codegen/server_interface.h
  59. 46
      include/grpc++/impl/codegen/service_type.h
  60. 440
      include/grpc++/impl/codegen/sync_stream.h
  61. 6
      include/grpc++/impl/codegen/time.h
  62. 4
      include/grpc++/security/credentials.h
  63. 3
      include/grpc++/server.h
  64. 6
      include/grpc++/server_builder.h
  65. 8
      include/grpc/impl/codegen/grpc_types.h
  66. 4
      package.json
  67. 2
      package.xml
  68. 2
      setup.py
  69. 199
      src/compiler/cpp_generator.cc
  70. 365
      src/core/ext/filters/client_channel/client_channel.c
  71. 2
      src/core/ext/filters/client_channel/client_channel.h
  72. 1
      src/core/ext/filters/client_channel/client_channel_plugin.c
  73. 56
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  74. 6
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
  75. 3
      src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
  76. 26
      src/core/ext/filters/deadline/deadline_filter.c
  77. 18
      src/core/ext/filters/max_age/max_age_filter.c
  78. 22
      src/core/ext/filters/message_size/message_size_filter.c
  79. 8
      src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
  80. 3
      src/core/ext/filters/workarounds/workaround_utils.c
  81. 46
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  82. 7
      src/core/ext/transport/chttp2/transport/internal.h
  83. 13
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  84. 56
      src/core/lib/iomgr/ev_epollex_linux.c
  85. 13
      src/core/lib/iomgr/ev_epollsig_linux.c
  86. 4
      src/core/lib/iomgr/resolve_address_uv.c
  87. 3
      src/core/lib/iomgr/tcp_client_uv.c
  88. 1
      src/core/lib/iomgr/tcp_server_uv.c
  89. 15
      src/core/lib/iomgr/tcp_uv.c
  90. 68
      src/core/lib/iomgr/timer_generic.c
  91. 101
      src/core/lib/iomgr/timer_manager.c
  92. 45
      src/core/lib/security/credentials/jwt/jwt_verifier.c
  93. 100
      src/core/lib/security/transport/client_auth_filter.c
  94. 6
      src/core/lib/security/transport/security_connector.c
  95. 143
      src/core/lib/security/transport/server_auth_filter.c
  96. 4
      src/core/lib/support/arena.c
  97. 14
      src/core/lib/support/atm.c
  98. 2
      src/core/lib/support/avl.c
  99. 4
      src/core/lib/support/log_linux.c
  100. 25
      src/core/lib/support/mpscq.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,3 +1,5 @@
-Wall
-Wc++-compat
-Ithird_party/googletest/include
-Ithird_party/googletest
-Iinclude

@ -335,6 +335,7 @@ grpc_cc_library(
"src/core/lib/support/log_windows.c",
"src/core/lib/support/mpscq.c",
"src/core/lib/support/murmur_hash.c",
"src/core/lib/support/stack_lockfree.c",
"src/core/lib/support/string.c",
"src/core/lib/support/string_posix.c",
"src/core/lib/support/string_util_windows.c",
@ -370,6 +371,7 @@ grpc_cc_library(
"src/core/lib/support/mpscq.h",
"src/core/lib/support/murmur_hash.h",
"src/core/lib/support/spinlock.h",
"src/core/lib/support/stack_lockfree.h",
"src/core/lib/support/string.h",
"src/core/lib/support/string_windows.h",
"src/core/lib/support/thd_internal.h",

@ -444,6 +444,7 @@ add_dependencies(buildtests_c gpr_host_port_test)
add_dependencies(buildtests_c gpr_log_test)
add_dependencies(buildtests_c gpr_mpscq_test)
add_dependencies(buildtests_c gpr_spinlock_test)
add_dependencies(buildtests_c gpr_stack_lockfree_test)
add_dependencies(buildtests_c gpr_string_test)
add_dependencies(buildtests_c gpr_sync_test)
add_dependencies(buildtests_c gpr_thd_test)
@ -789,6 +790,7 @@ add_library(gpr
src/core/lib/support/log_windows.c
src/core/lib/support/mpscq.c
src/core/lib/support/murmur_hash.c
src/core/lib/support/stack_lockfree.c
src/core/lib/support/string.c
src/core/lib/support/string_posix.c
src/core/lib/support/string_util_windows.c
@ -6016,6 +6018,35 @@ target_link_libraries(gpr_spinlock_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(gpr_stack_lockfree_test
test/core/support/stack_lockfree_test.c
)
target_include_directories(gpr_stack_lockfree_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_BUILD_INCLUDE_DIR}
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CARES_PLATFORM_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(gpr_stack_lockfree_test
${_gRPC_ALLTARGETS_LIBRARIES}
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(gpr_string_test
test/core/support/string_test.c
)

@ -77,7 +77,7 @@ For developers who are interested to contribute, here is how to compile the
gRPC C Core library.
```sh
$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc
$ cd grpc
$ git submodule update --init
$ make
@ -95,7 +95,7 @@ on experience with the tools involved.
Builds gRPC C and C++ with boringssl.
- Install [CMake](https://cmake.org/download/).
- Install [Active State Perl](http://www.activestate.com/activeperl/) (`choco install activeperl`)
- Install [Active State Perl](https://www.activestate.com/activeperl/) (`choco install activeperl`)
- Install [Ninja](https://ninja-build.org/) (`choco install ninja`)
- Install [Go](https://golang.org/dl/) (`choco install golang`)
- Install [yasm](http://yasm.tortall.net/) and add it to `PATH` (`choco install yasm`)

@ -995,6 +995,7 @@ gpr_host_port_test: $(BINDIR)/$(CONFIG)/gpr_host_port_test
gpr_log_test: $(BINDIR)/$(CONFIG)/gpr_log_test
gpr_mpscq_test: $(BINDIR)/$(CONFIG)/gpr_mpscq_test
gpr_spinlock_test: $(BINDIR)/$(CONFIG)/gpr_spinlock_test
gpr_stack_lockfree_test: $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test
gpr_string_test: $(BINDIR)/$(CONFIG)/gpr_string_test
gpr_sync_test: $(BINDIR)/$(CONFIG)/gpr_sync_test
gpr_thd_test: $(BINDIR)/$(CONFIG)/gpr_thd_test
@ -1376,6 +1377,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/gpr_log_test \
$(BINDIR)/$(CONFIG)/gpr_mpscq_test \
$(BINDIR)/$(CONFIG)/gpr_spinlock_test \
$(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test \
$(BINDIR)/$(CONFIG)/gpr_string_test \
$(BINDIR)/$(CONFIG)/gpr_sync_test \
$(BINDIR)/$(CONFIG)/gpr_thd_test \
@ -1803,6 +1805,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/gpr_mpscq_test || ( echo test gpr_mpscq_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_spinlock_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_spinlock_test || ( echo test gpr_spinlock_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_stack_lockfree_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test || ( echo test gpr_stack_lockfree_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_string_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_string_test || ( echo test gpr_string_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_sync_test"
@ -2748,6 +2752,7 @@ LIBGPR_SRC = \
src/core/lib/support/log_windows.c \
src/core/lib/support/mpscq.c \
src/core/lib/support/murmur_hash.c \
src/core/lib/support/stack_lockfree.c \
src/core/lib/support/string.c \
src/core/lib/support/string_posix.c \
src/core/lib/support/string_util_windows.c \
@ -9692,6 +9697,38 @@ endif
endif
GPR_STACK_LOCKFREE_TEST_SRC = \
test/core/support/stack_lockfree_test.c \
GPR_STACK_LOCKFREE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GPR_STACK_LOCKFREE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test: $(GPR_STACK_LOCKFREE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(GPR_STACK_LOCKFREE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test
endif
$(OBJDIR)/$(CONFIG)/test/core/support/stack_lockfree_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_gpr_stack_lockfree_test: $(GPR_STACK_LOCKFREE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(GPR_STACK_LOCKFREE_TEST_OBJS:.o=.dep)
endif
endif
GPR_STRING_TEST_SRC = \
test/core/support/string_test.c \

@ -67,6 +67,14 @@
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99'
],
'cflags_cc': [
'-Werror',
'-std=c++11'
],
'include_dirs': [
'.',
'include'
@ -164,6 +172,24 @@
'<(node_root_dir)/deps/zlib',
'<(node_root_dir)/deps/cares/include'
]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
},
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
],
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++',
'-std=c++11'
],
}]
]
},
@ -171,12 +197,6 @@
['OS=="win" or runtime=="electron"', {
'targets': [
{
'cflags': [
'-std=c++11',
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'boringssl',
'product_prefix': 'lib',
'type': 'static_library',
@ -488,17 +508,6 @@
'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c',
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9',
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++',
'-std=c++11'
],
}
}],
],
},
],
}],
@ -536,11 +545,6 @@
'targets': [
# Only want to compile zlib under Windows
{
'cflags': [
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'z',
'product_prefix': 'lib',
'type': 'static_library',
@ -569,11 +573,6 @@
],
'targets': [
{
'cflags': [
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'gpr',
'product_prefix': 'lib',
'type': 'static_library',
@ -604,6 +603,7 @@
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
@ -626,20 +626,8 @@
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
],
"conditions": [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'cflags': [
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'grpc',
'product_prefix': 'lib',
'type': 'static_library',
@ -897,20 +885,12 @@
'src/core/ext/filters/workarounds/workaround_utils.c',
'src/core/plugin_registry/grpc_plugin_registry.c',
],
"conditions": [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'include_dirs': [
"<!(node -e \"require('nan')\")"
],
'cflags': [
'-std=c++11',
'-pthread',
'-zdefs',
'-Wno-error=deprecated-declarations'
@ -921,15 +901,6 @@
"boringssl",
]
}],
['OS=="mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9',
'OTHER_CFLAGS': [
'-stdlib=libc++',
'-std=c++11'
]
}
}],
['OS=="win"', {
'dependencies': [
"z",

@ -99,6 +99,7 @@ filegroups:
- src/core/lib/support/mpscq.h
- src/core/lib/support/murmur_hash.h
- src/core/lib/support/spinlock.h
- src/core/lib/support/stack_lockfree.h
- src/core/lib/support/string.h
- src/core/lib/support/string_windows.h
- src/core/lib/support/thd_internal.h
@ -129,6 +130,7 @@ filegroups:
- src/core/lib/support/log_windows.c
- src/core/lib/support/mpscq.c
- src/core/lib/support/murmur_hash.c
- src/core/lib/support/stack_lockfree.c
- src/core/lib/support/string.c
- src/core/lib/support/string_posix.c
- src/core/lib/support/string_util_windows.c
@ -2121,6 +2123,15 @@ targets:
deps:
- gpr_test_util
- gpr
- name: gpr_stack_lockfree_test
cpu_cost: 7
build: test
language: c
src:
- test/core/support/stack_lockfree_test.c
deps:
- gpr_test_util
- gpr
- name: gpr_string_test
build: test
language: c

@ -3,7 +3,7 @@
"type": "library",
"description": "gRPC library for PHP",
"keywords": ["rpc"],
"homepage": "http://grpc.io",
"homepage": "https://grpc.io",
"license": "Apache-2.0",
"require": {
"php": ">=5.5.0"

@ -63,6 +63,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/support/log_windows.c \
src/core/lib/support/mpscq.c \
src/core/lib/support/murmur_hash.c \
src/core/lib/support/stack_lockfree.c \
src/core/lib/support/string.c \
src/core/lib/support/string_posix.c \
src/core/lib/support/string_util_windows.c \

@ -40,6 +40,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\support\\log_windows.c " +
"src\\core\\lib\\support\\mpscq.c " +
"src\\core\\lib\\support\\murmur_hash.c " +
"src\\core\\lib\\support\\stack_lockfree.c " +
"src\\core\\lib\\support\\string.c " +
"src\\core\\lib\\support\\string_posix.c " +
"src\\core\\lib\\support\\string_util_windows.c " +

@ -3,14 +3,14 @@
gRPC-Web provides a JS client library that supports the same API
as gRPC-Node to access a gRPC service. Due to browser limitation,
the Web client library implements a different protocol than the
[native gRPC protocol](http://www.grpc.io/docs/guides/wire.html).
[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
This protocol is designed to make it easy for a proxy to translate
between the protocols as this is the most likely deployment model.
This document lists the differences between the two protocols.
To help tracking future revisions, this document describes a delta
with the protocol details specified in the
[native gRPC protocol](http://www.grpc.io/docs/guides/wire.html).
[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
# Design goals
@ -31,7 +31,7 @@ web-specific features such as CORS, XSRF
* become optional (in 1-2 years) when browsers are able to speak the native
gRPC protocol via the new [whatwg fetch/streams API](https://github.com/whatwg/fetch)
# Protocol differences vs [gRPC over HTTP2](http://www.grpc.io/docs/guides/wire.html)
# Protocol differences vs [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html)
Content-Type
@ -53,14 +53,14 @@ HTTP wire protocols
---
HTTP/2 related behavior (specified in [gRPC over HTTP2](http://www.grpc.io/docs/guides/wire.html))
HTTP/2 related behavior (specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
1. stream-id is not supported or used
2. go-away is not supported or used
---
Message framing (vs. [http2-transport-mapping](http://www.grpc.io/docs/guides/wire.html#http2-transport-mapping))
Message framing (vs. [http2-transport-mapping](https://grpc.io/docs/guides/wire.html#http2-transport-mapping))
1. Response status encoded as part of the response body
* Key-value pairs encoded as a HTTP/1 headers block (without the terminating newline), per https://tools.ietf.org/html/rfc7230#section-3.2
@ -86,7 +86,7 @@ in the body.
User Agent
* Do NOT use User-Agent header (which is to be set by browsers, by default)
* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](http://www.grpc.io/docs/guides/wire.html))
* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
---

@ -42,6 +42,8 @@ some configuration as environment variables that can be set.
- bdp_estimator - traces behavior of bdp estimation logic
- call_error - traces the possible errors contributing to final call status
- channel - traces operations on the C core channel stack
- client_channel - traces client channel activity, including resolver
and load balancing policy interaction
- combiner - traces combiner lock state
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels

@ -183,7 +183,8 @@ the `response_compressed` boolean.
Whether compression was actually performed is determined by the compression bit
in the response's message flags. *Note that some languages may not have access
to the message flags*.
to the message flags, in which case the client will be unable to verify that
the `response_compressed` boolean is obeyed by the server*.
Server features:
@ -218,10 +219,10 @@ Procedure:
```
Client asserts:
* call was successful
* when `response_compressed` is true, the response MUST have the
compressed message flag set.
* when `response_compressed` is false, the response MUST NOT have
the compressed message flag set.
* if supported by the implementation, when `response_compressed` is true,
the response MUST have the compressed message flag set.
* if supported by the implementation, when `response_compressed` is false,
the response MUST NOT have the compressed message flag set.
* response payload body is 314159 bytes in size in both cases.
* clients are free to assert that the response payload body contents are
zero and comparing the entire response message against a golden response
@ -304,8 +305,8 @@ Procedure:
}
}
```
If the call fails with `INVALID_ARGUMENT`, the test fails. Otherwise, we
continue.
If the call does not fail with `INVALID_ARGUMENT`, the test fails.
Otherwise, we continue.
1. Client calls `StreamingInputCall` again, sending the *compressed* message
@ -377,7 +378,13 @@ Client asserts:
### server_compressed_streaming
This test verifies that the server can compress streaming messages and disable
compression on individual messages.
compression on individual messages, expecting the server's response to be
compressed or not according to the `response_compressed` boolean.
Whether compression was actually performed is determined by the compression bit
in the response's message flags. *Note that some languages may not have access
to the message flags, in which case the client will be unable to verify that the
`response_compressed` boolean is obeyed by the server*.
Server features:
* [StreamingOutputCall][]
@ -407,15 +414,14 @@ Procedure:
Client asserts:
* call was successful
* exactly two responses
* when `response_compressed` is false, the response's messages MUST
NOT have the compressed message flag set.
* when `response_compressed` is true, the response's messages MUST
have the compressed message flag set.
* if supported by the implementation, when `response_compressed` is false,
the response's messages MUST NOT have the compressed message flag set.
* if supported by the implementation, when `response_compressed` is true,
the response's messages MUST have the compressed message flag set.
* response payload bodies are sized (in order): 31415, 92653
* clients are free to assert that the response payload body contents are
zero and comparing the entire response messages against golden responses
### ping_pong
This test verifies that full duplex bidi is supported.
@ -1095,4 +1101,3 @@ Discussion:
Ideally, this would be communicated via metadata and not in the
request/response, but we want to use this test in code paths that don't yet
fully communicate metadata.

@ -9,7 +9,7 @@ Examples for Go and Java gRPC live in their own repositories:
* [Android Java](https://github.com/grpc/grpc-java/tree/master/examples/android)
* [Go](https://github.com/grpc/grpc-go/tree/master/examples)
For more comprehensive documentation, including an [overview](http://www.grpc.io/docs/) and tutorials that use this example code, visit [grpc.io](http://www.grpc.io/docs/).
For more comprehensive documentation, including an [overview](https://grpc.io/docs/) and tutorials that use this example code, visit [grpc.io](https://grpc.io/docs/).
## Quick start

@ -12,7 +12,7 @@ following command:
```sh
$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc
```
Change your current directory to examples/cpp/helloworld

@ -49,4 +49,4 @@ Tutorial
You can find a more detailed tutorial about Grpc in [gRPC Basics: C#][]
[helloworld.proto]:../../protos/helloworld.proto
[gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html
[gRPC Basics: C#]:https://grpc.io/docs/tutorials/basic/csharp.html

@ -64,4 +64,4 @@ You can find a more detailed tutorial in [gRPC Basics: C#][]
[helloworld-from-cli]:../helloworld-from-cli/README.md
[helloworld.proto]:../../protos/helloworld.proto
[gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html
[gRPC Basics: C#]:https://grpc.io/docs/tutorials/basic/csharp.html

@ -3,4 +3,4 @@
The files in this folder are the samples used in [gRPC Basics: C#][],
a detailed tutorial for using gRPC in C#.
[gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html
[gRPC Basics: C#]:https://grpc.io/docs/tutorials/basic/csharp.html

@ -12,7 +12,7 @@ INSTALL
```sh
$ # Get the gRPC repository
$ export REPO_ROOT=grpc # REPO root can be any directory of your choice
$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT
$ cd $REPO_ROOT
$ cd examples/node
@ -47,4 +47,4 @@ TUTORIAL
You can find a more detailed tutorial in [gRPC Basics: Node.js][]
[Install gRPC Node]:../../src/node
[gRPC Basics: Node.js]:http://www.grpc.io/docs/tutorials/basic/node.html
[gRPC Basics: Node.js]:https://grpc.io/docs/tutorials/basic/node.html

@ -2,4 +2,4 @@
The files in this folder are the samples used in [gRPC Basics: Node.js][], a detailed tutorial for using gRPC in Node.js.
[gRPC Basics: Node.js]:http://www.grpc.io/docs/tutorials/basic/node.html
[gRPC Basics: Node.js]:https://grpc.io/docs/tutorials/basic/node.html

@ -2,4 +2,4 @@
The files in this folder are the samples used in [gRPC Basics: Node.js][], a detailed tutorial for using gRPC in Node.js.
[gRPC Basics: Node.js]:http://www.grpc.io/docs/tutorials/basic/node.html
[gRPC Basics: Node.js]:https://grpc.io/docs/tutorials/basic/node.html

@ -3,7 +3,7 @@ Pod::Spec.new do |s|
s.version = "0.0.1"
s.license = "Apache License, Version 2.0"
s.authors = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
s.homepage = "http://www.grpc.io/"
s.homepage = "https://grpc.io/"
s.summary = "AuthTestService example"
s.source = { :git => 'https://github.com/grpc/grpc.git' }

@ -1,3 +1,3 @@
# OAuth2 on gRPC: Objective-C
This is the supporting code for the tutorial "[OAuth2 on gRPC: Objective-C](http://www.grpc.io/docs/tutorials/auth/oauth2-objective-c.html)."
This is the supporting code for the tutorial "[OAuth2 on gRPC: Objective-C](https://grpc.io/docs/tutorials/auth/oauth2-objective-c.html)."

@ -3,7 +3,7 @@ Pod::Spec.new do |s|
s.version = "0.0.1"
s.license = "Apache License, Version 2.0"
s.authors = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
s.homepage = "http://www.grpc.io/"
s.homepage = "https://grpc.io/"
s.summary = "HelloWorld example"
s.source = { :git => 'https://github.com/grpc/grpc.git' }

@ -16,7 +16,7 @@ this repository to your local machine by running the following commands:
```sh
$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc
$ cd grpc
$ git submodule update --init
```
@ -55,4 +55,4 @@ responds with a `HLWHelloResponse`, which contains a string that is then output
## Tutorial
You can find a more detailed tutorial in [gRPC Basics: Objective-C](http://www.grpc.io/docs/tutorials/basic/objective-c.html).
You can find a more detailed tutorial in [gRPC Basics: Objective-C](https://grpc.io/docs/tutorials/basic/objective-c.html).

@ -1,4 +1,4 @@
# gRPC Basics: Objective-C
This is the supporting code for the tutorial "[gRPC Basics: Objective-C](http://www.grpc.io/docs/tutorials/basic/objective-c.html)."
This is the supporting code for the tutorial "[gRPC Basics: Objective-C](https://grpc.io/docs/tutorials/basic/objective-c.html)."

@ -3,7 +3,7 @@ Pod::Spec.new do |s|
s.version = "0.0.1"
s.license = "Apache License, Version 2.0"
s.authors = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
s.homepage = "http://www.grpc.io/"
s.homepage = "https://grpc.io/"
s.summary = "RouteGuide example"
s.source = { :git => 'https://github.com/grpc/grpc.git' }

@ -17,7 +17,7 @@ INSTALL
- Clone this repository
```sh
$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc
```
- Install composer
@ -60,4 +60,4 @@ TUTORIAL
You can find a more detailed tutorial in [gRPC Basics: PHP][]
[Node]:https://github.com/grpc/grpc/tree/master/examples/node
[gRPC Basics: PHP]:http://www.grpc.io/docs/tutorials/basic/php.html
[gRPC Basics: PHP]:https://grpc.io/docs/tutorials/basic/php.html

@ -3,4 +3,4 @@
The files in this folder are the samples used in [gRPC Basics: PHP][],
a detailed tutorial for using gRPC in PHP.
[gRPC Basics: PHP]:http://www.grpc.io/docs/tutorials/basic/php.html
[gRPC Basics: PHP]:https://grpc.io/docs/tutorials/basic/php.html

@ -1 +1 @@
[This code's documentation lives on the grpc.io site.](http://www.grpc.io/docs/quickstart/python.html)
[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/quickstart/python.html)

@ -1 +1 @@
[This code's documentation lives on the grpc.io site.](http://www.grpc.io/docs/quickstart/python.html)
[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/quickstart/python.html)

@ -1,3 +1,3 @@
An example showing two stubs sharing a channel and two servicers sharing a server.
More complete documentation lives at [grpc.io](http://www.grpc.io/docs/tutorials/basic/python.html).
More complete documentation lives at [grpc.io](https://grpc.io/docs/tutorials/basic/python.html).

@ -1 +1 @@
[This code's documentation lives on the grpc.io site.](http://www.grpc.io/docs/tutorials/basic/python.html)
[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/tutorials/basic/python.html)

@ -60,4 +60,4 @@ You can find a more detailed tutorial in [gRPC Basics: Ruby][]
[helloworld.proto]:../protos/helloworld.proto
[RVM]:https://www.rvm.io/
[Install gRPC ruby]:../../src/ruby#installation
[gRPC Basics: Ruby]:http://www.grpc.io/docs/tutorials/basic/ruby.html
[gRPC Basics: Ruby]:https://grpc.io/docs/tutorials/basic/ruby.html

@ -3,4 +3,4 @@
The files in this folder are the samples used in [gRPC Basics: Ruby][],
a detailed tutorial for using gRPC in Ruby.
[gRPC Basics: Ruby]:http://www.grpc.io/docs/tutorials/basic/ruby.html
[gRPC Basics: Ruby]:https://grpc.io/docs/tutorials/basic/ruby.html

@ -25,15 +25,13 @@ Pod::Spec.new do |s|
version = '1.5.0-dev'
s.version = version
s.summary = 'Core cross-platform gRPC library, written in C'
s.homepage = 'http://www.grpc.io'
s.homepage = 'https://grpc.io'
s.license = 'Apache License, Version 2.0'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
s.source = {
:git => 'https://github.com/grpc/grpc.git',
:tag => "v#{version}",
# TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules.
:submodules => true,
}
s.ios.deployment_target = '7.0'
@ -179,6 +177,7 @@ Pod::Spec.new do |s|
ss.libraries = 'z'
ss.dependency "#{s.name}/Interface", version
ss.dependency 'BoringSSL', '~> 8.0'
ss.dependency 'nanopb', '~> 0.3'
# To save you from scrolling, this is the last part of the podspec.
ss.source_files = 'src/core/lib/profiling/timers.h',
@ -193,6 +192,7 @@ Pod::Spec.new do |s|
'src/core/lib/support/mpscq.h',
'src/core/lib/support/murmur_hash.h',
'src/core/lib/support/spinlock.h',
'src/core/lib/support/stack_lockfree.h',
'src/core/lib/support/string.h',
'src/core/lib/support/string_windows.h',
'src/core/lib/support/thd_internal.h',
@ -222,6 +222,7 @@ Pod::Spec.new do |s|
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
@ -428,10 +429,6 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'third_party/nanopb/pb.h',
'third_party/nanopb/pb_common.h',
'third_party/nanopb/pb_decode.h',
'third_party/nanopb/pb_encode.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',
@ -674,9 +671,6 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c',
@ -721,6 +715,7 @@ Pod::Spec.new do |s|
'src/core/lib/support/mpscq.h',
'src/core/lib/support/murmur_hash.h',
'src/core/lib/support/spinlock.h',
'src/core/lib/support/stack_lockfree.h',
'src/core/lib/support/string.h',
'src/core/lib/support/string_windows.h',
'src/core/lib/support/thd_internal.h',
@ -911,10 +906,6 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'third_party/nanopb/pb.h',
'third_party/nanopb/pb_common.h',
'third_party/nanopb/pb_decode.h',
'third_party/nanopb/pb_encode.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',

@ -24,7 +24,7 @@ Pod::Spec.new do |s|
version = '1.5.0-dev'
s.version = version
s.summary = 'RPC library for Protocol Buffers, based on gRPC'
s.homepage = 'http://www.grpc.io'
s.homepage = 'https://grpc.io'
s.license = 'Apache License, Version 2.0'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }

@ -24,7 +24,7 @@ Pod::Spec.new do |s|
version = '1.5.0-dev'
s.version = version
s.summary = 'Reactive Extensions library for iOS/OSX.'
s.homepage = 'http://www.grpc.io'
s.homepage = 'https://grpc.io'
s.license = 'Apache License, Version 2.0'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }

@ -23,7 +23,7 @@ Pod::Spec.new do |s|
version = '1.5.0-dev'
s.version = version
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'http://www.grpc.io'
s.homepage = 'https://grpc.io'
s.license = 'Apache License, Version 2.0'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }

@ -92,6 +92,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/mpscq.h )
s.files += %w( src/core/lib/support/murmur_hash.h )
s.files += %w( src/core/lib/support/spinlock.h )
s.files += %w( src/core/lib/support/stack_lockfree.h )
s.files += %w( src/core/lib/support/string.h )
s.files += %w( src/core/lib/support/string_windows.h )
s.files += %w( src/core/lib/support/thd_internal.h )
@ -121,6 +122,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/log_windows.c )
s.files += %w( src/core/lib/support/mpscq.c )
s.files += %w( src/core/lib/support/murmur_hash.c )
s.files += %w( src/core/lib/support/stack_lockfree.c )
s.files += %w( src/core/lib/support/string.c )
s.files += %w( src/core/lib/support/string_posix.c )
s.files += %w( src/core/lib/support/string_util_windows.c )

@ -77,7 +77,7 @@ class Alarm : private GrpcLibraryCodegen {
void Cancel() { grpc_alarm_cancel(alarm_); }
private:
class AlarmEntry : public CompletionQueueTag {
class AlarmEntry : public internal::CompletionQueueTag {
public:
AlarmEntry(void* tag) : tag_(tag) {}
bool FinalizeResult(void** tag, bool* status) override {

@ -32,7 +32,7 @@ struct grpc_channel;
namespace grpc {
/// Channels represent a connection to an endpoint. Created by \a CreateChannel.
class Channel final : public ChannelInterface,
public CallHook,
public internal::CallHook,
public std::enable_shared_from_this<Channel>,
private GrpcLibraryCodegen {
public:
@ -52,7 +52,7 @@ class Channel final : public ChannelInterface,
private:
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
const internal::RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
@ -60,9 +60,11 @@ class Channel final : public ChannelInterface,
const grpc::string& host, grpc_channel* c_channel);
Channel(const grpc::string& host, grpc_channel* c_channel);
Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) override;
void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void* RegisterMethod(const char* method) override;
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,

@ -21,7 +21,7 @@
/// The gRPC C++ API mainly consists of the following classes:
/// <br>
/// - grpc::Channel, which represents the connection to an endpoint. See [the
/// gRPC Concepts page](http://www.grpc.io/docs/guides/concepts.html) for more
/// gRPC Concepts page](https://grpc.io/docs/guides/concepts.html) for more
/// details. Channels are created by the factory function grpc::CreateChannel.
///
/// - grpc::CompletionQueue, the producer-consumer queue used for all

@ -30,6 +30,7 @@ namespace grpc {
class CompletionQueue;
namespace internal {
/// Common interface for all client side asynchronous streaming.
class ClientAsyncStreamingInterface {
public:
@ -146,9 +147,41 @@ class AsyncWriterInterface {
}
};
} // namespace internal
template <class R>
class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
public AsyncReaderInterface<R> {};
class ClientAsyncReaderInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {};
/// Common interface for client side asynchronous writing.
template <class W>
class ClientAsyncWriterInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
///
/// \param[in] tag The tag identifying the operation.
virtual void WritesDone(void* tag) = 0;
};
/// Async client-side interface for bi-directional streaming,
/// where the client-to-server message stream has messages of type \a W,
/// and the server-to-client message stream has messages of type \a R.
template <class W, class R>
class ClientAsyncReaderWriterInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
///
/// \param[in] tag The tag identifying the operation.
virtual void WritesDone(void* tag) = 0;
};
/// Async client-side API for doing server-streaming RPCs,
/// where the incoming message stream coming from the server has
@ -156,21 +189,24 @@ class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started and
/// \a request has been written out.
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag);
}
struct internal {
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started and
/// \a request has been written out.
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncReader* Create(::grpc::ChannelInterface* channel,
CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request,
void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag);
}
};
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
@ -218,8 +254,8 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
private:
template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request,
void* tag)
ClientAsyncReader(::grpc::internal::Call call, ClientContext* context,
const W& request, void* tag)
: context_(context), call_(call) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
@ -231,24 +267,19 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::Call call_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
};
/// Common interface for client side asynchronous writing.
template <class W>
class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
public AsyncWriterInterface<W> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
///
/// \param[in] tag The tag identifying the operation.
virtual void WritesDone(void* tag) = 0;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
/// Async API on the client side for doing client-streaming RPCs,
@ -257,24 +288,27 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out.
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
/// \a response will be filled in with the single expected response
/// message from the server upon a successful call to the \a Finish
/// method of this instance.
template <class R>
static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag);
}
struct internal {
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out.
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
/// \a response will be filled in with the single expected response
/// message from the server upon a successful call to the \a Finish
/// method of this instance.
template <class R>
static ClientAsyncWriter* Create(::grpc::ChannelInterface* channel,
CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response,
void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag);
}
};
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
@ -338,7 +372,8 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
private:
template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, void* tag)
ClientAsyncWriter(::grpc::internal::Call call, ClientContext* context,
R* response, void* tag)
: context_(context), call_(call) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
@ -356,30 +391,19 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::Call call_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
/// Async client-side interface for bi-directional streaming,
/// where the client-to-server message stream has messages of type \a W,
/// and the server-to-client message stream has messages of type \a R.
template <class W, class R>
class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
public AsyncWriterInterface<W>,
public AsyncReaderInterface<R> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
///
/// \param[in] tag The tag identifying the operation.
virtual void WritesDone(void* tag) = 0;
};
/// Async client-side interface for bi-directional streaming,
/// where the outgoing message stream going to the server
/// has messages of type \a W, and the incoming message stream coming
@ -388,21 +412,23 @@ template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent).
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context, void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag);
}
struct internal {
/// Create a stream and write the first request out.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent).
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(
::grpc::ChannelInterface* channel, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method, ClientContext* context,
void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag);
}
};
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
@ -471,7 +497,8 @@ class ClientAsyncReaderWriter final
}
private:
ClientAsyncReaderWriter(Call call, ClientContext* context, void* tag)
ClientAsyncReaderWriter(::grpc::internal::Call call, ClientContext* context,
void* tag)
: context_(context), call_(call) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
@ -487,17 +514,25 @@ class ClientAsyncReaderWriter final
}
ClientContext* context_;
Call call_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::Call call_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
template <class W, class R>
class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface,
public AsyncReaderInterface<R> {
class ServerAsyncReaderInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {
public:
/// Indicate that the stream is to be finished with a certain status code
/// and also send out \a msg response to the client.
@ -541,6 +576,89 @@ class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface,
virtual void FinishWithError(const Status& status, void* tag) = 0;
};
template <class W>
class ServerAsyncWriterInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
/// signals to the client to end the call.
/// Should not be used concurrently with other operations.
///
/// It is appropriate to call this method when either:
/// * all messages from the client have been received (either known
/// implictly, or explicitly because a previous \a
/// AsyncReaderInterface::Read operation with a non-ok
/// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
/// * it is desired to end the call early with some non-OK status code.
///
/// This operation will end when the server has finished sending out initial
/// metadata (if not sent already), response message, and status, or if
/// some failure occurred when trying to do so.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
/// Request the writing of \a msg and coalesce it with trailing metadata which
/// contains \a status, using WriteOptions options with
/// identifying tag \a tag.
///
/// WriteAndFinish is equivalent of performing WriteLast and Finish
/// in a single step.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
/// \param[in] tag The tag identifying the operation.
virtual void WriteAndFinish(const W& msg, WriteOptions options,
const Status& status, void* tag) = 0;
};
/// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R>
class ServerAsyncReaderWriterInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
/// signals to the client to end the call.
/// Should not be used concurrently with other operations.
///
/// It is appropriate to call this method when either:
/// * all messages from the client have been received (either known
/// implictly, or explicitly because a previous \a
/// AsyncReaderInterface::Read operation
/// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
/// with 'false'.
/// * it is desired to end the call early with some non-OK status code.
///
/// This operation will end when the server has finished sending out initial
/// metadata (if not sent already), response message, and status, or if some
/// failure occurred when trying to do so.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
/// Request the writing of \a msg and coalesce it with trailing metadata which
/// contains \a status, using WriteOptions options with
/// identifying tag \a tag.
///
/// WriteAndFinish is equivalent of performing WriteLast and Finish in a
/// single step.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
/// \param[in] tag The tag identifying the operation.
virtual void WriteAndFinish(const W& msg, WriteOptions options,
const Status& status, void* tag) = 0;
};
/// Async server-side API for doing client-streaming RPCs,
/// where the incoming message stream from the client has messages of type \a R,
/// and the single response message sent from the server is type \a W.
@ -624,56 +742,19 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
template <class W>
class ServerAsyncWriterInterface : public ServerAsyncStreamingInterface,
public AsyncWriterInterface<W> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
/// signals to the client to end the call.
/// Should not be used concurrently with other operations.
///
/// It is appropriate to call this method when either:
/// * all messages from the client have been received (either known
/// implictly, or explicitly because a previous \a
/// AsyncReaderInterface::Read operation with a non-ok
/// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
/// * it is desired to end the call early with some non-OK status code.
///
/// This operation will end when the server has finished sending out initial
/// metadata (if not sent already), response message, and status, or if
/// some failure occurred when trying to do so.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
/// Request the writing of \a msg and coalesce it with trailing metadata which
/// contains \a status, using WriteOptions options with
/// identifying tag \a tag.
///
/// WriteAndFinish is equivalent of performing WriteLast and Finish
/// in a single step.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
/// \param[in] tag The tag identifying the operation.
virtual void WriteAndFinish(const W& msg, WriteOptions options,
const Status& status, void* tag) = 0;
};
/// Async server-side API for doing server streaming RPCs,
/// where the outgoing message stream from the server has messages of type \a W.
template <class W>
@ -755,7 +836,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T>
void EnsureInitialMetadataSent(T* ops) {
@ -769,55 +850,17 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
}
}
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
};
/// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R>
class ServerAsyncReaderWriterInterface : public ServerAsyncStreamingInterface,
public AsyncWriterInterface<W>,
public AsyncReaderInterface<R> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
/// signals to the client to end the call.
/// Should not be used concurrently with other operations.
///
/// It is appropriate to call this method when either:
/// * all messages from the client have been received (either known
/// implictly, or explicitly because a previous \a
/// AsyncReaderInterface::Read operation
/// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
/// with 'false'.
/// * it is desired to end the call early with some non-OK status code.
///
/// This operation will end when the server has finished sending out initial
/// metadata (if not sent already), response message, and status, or if some
/// failure occurred when trying to do so.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
/// Request the writing of \a msg and coalesce it with trailing metadata which
/// contains \a status, using WriteOptions options with
/// identifying tag \a tag.
///
/// WriteAndFinish is equivalent of performing WriteLast and Finish in a
/// single step.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
/// \param[in] tag The tag identifying the operation.
virtual void WriteAndFinish(const W& msg, WriteOptions options,
const Status& status, void* tag) = 0;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
/// Async server-side API for doing bidirectional streaming RPCs,
@ -912,7 +955,7 @@ class ServerAsyncReaderWriter final
private:
friend class ::grpc::Server;
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T>
void EnsureInitialMetadataSent(T* ops) {
@ -926,14 +969,18 @@ class ServerAsyncReaderWriter final
}
}
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
} // namespace grpc

@ -75,17 +75,18 @@ class ClientAsyncResponseReader final
/// intitial metadata sent) and \a request has been written out.
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncResponseReader* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context,
const W& request) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request);
}
struct internal {
template <class W>
static ClientAsyncResponseReader* Create(
::grpc::ChannelInterface* channel, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method, ClientContext* context,
const W& request) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request);
}
};
/// TODO(vjpai): Delete the below constructor
/// PLEASE DO NOT USE THIS CONSTRUCTOR IN NEW CODE
@ -94,9 +95,10 @@ class ClientAsyncResponseReader final
/// created this struct rather than properly using a stub.
/// This code will not remain a valid public constructor for long.
template <class W>
ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const W& request)
ClientAsyncResponseReader(::grpc::ChannelInterface* channel,
CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
call_(channel->CreateCall(method, context, cq)),
collection_(std::make_shared<Ops>()) {
@ -123,18 +125,18 @@ class ClientAsyncResponseReader final
void ReadInitialMetadata(void* tag) {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
Ops& o = ops_;
Ops* o = &ops_;
// TODO(vjpai): Remove the collection_ specialization as soon
// as the public constructor is deleted
if (collection_) {
o = *collection_;
o = collection_.get();
collection_->meta_buf.SetCollection(collection_);
}
o.meta_buf.set_output_tag(tag);
o.meta_buf.RecvInitialMetadata(context_);
call_.PerformOps(&o.meta_buf);
o->meta_buf.set_output_tag(tag);
o->meta_buf.RecvInitialMetadata(context_);
call_.PerformOps(&o->meta_buf);
}
/// See \a ClientAysncResponseReaderInterface::Finish for semantics.
@ -143,31 +145,32 @@ class ClientAsyncResponseReader final
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void Finish(R* msg, Status* status, void* tag) {
Ops& o = ops_;
Ops* o = &ops_;
// TODO(vjpai): Remove the collection_ specialization as soon
// as the public constructor is deleted
if (collection_) {
o = *collection_;
o = collection_.get();
collection_->finish_buf.SetCollection(collection_);
}
o.finish_buf.set_output_tag(tag);
o->finish_buf.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
o.finish_buf.RecvInitialMetadata(context_);
o->finish_buf.RecvInitialMetadata(context_);
}
o.finish_buf.RecvMessage(msg);
o.finish_buf.AllowNoMessage();
o.finish_buf.ClientRecvStatus(context_, status);
call_.PerformOps(&o.finish_buf);
o->finish_buf.RecvMessage(msg);
o->finish_buf.AllowNoMessage();
o->finish_buf.ClientRecvStatus(context_, status);
call_.PerformOps(&o->finish_buf);
}
private:
ClientContext* const context_;
Call call_;
::grpc::internal::Call call_;
template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request)
ClientAsyncResponseReader(::grpc::internal::Call call, ClientContext* context,
const W& request)
: context_(context), call_(call) {
ops_.init_buf.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
@ -183,13 +186,17 @@ class ClientAsyncResponseReader final
// TODO(vjpai): Remove the reference to CallOpSetCollectionInterface
// as soon as the related workaround (public constructor) is deleted
struct Ops : public CallOpSetCollectionInterface {
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
struct Ops : public ::grpc::internal::CallOpSetCollectionInterface {
::grpc::internal::SneakyCallOpSet<
::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_buf;
CallOpSet<CallOpRecvInitialMetadata> meta_buf;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_buf;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>,
::grpc::internal::CallOpClientRecvStatus>
finish_buf;
} ops_;
@ -201,7 +208,8 @@ class ClientAsyncResponseReader final
/// Async server-side API for handling unary calls, where the single
/// response message sent to the client is of type \a W.
template <class W>
class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
class ServerAsyncResponseWriter final
: public internal::ServerAsyncStreamingInterface {
public:
explicit ServerAsyncResponseWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
@ -289,13 +297,15 @@ class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_buf_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_buf_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_buf_;
};

@ -44,11 +44,13 @@ struct grpc_byte_buffer;
namespace grpc {
class ByteBuffer;
class Call;
class CallHook;
class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface;
namespace internal {
class Call;
class CallHook;
const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin";
// TODO(yangg) if the map is changed before we send, the pointers will be a
@ -76,6 +78,7 @@ inline grpc_metadata* FillMetadataArray(
}
return metadata_array;
}
} // namespace internal
/// Per-message write options.
class WriteOptions {
@ -191,6 +194,7 @@ class WriteOptions {
bool last_message_;
};
namespace internal {
/// Default argument for CallOpSet. I is unused by the class, but can be
/// used for generating multiple names for the same thing.
template <int I>
@ -547,7 +551,10 @@ class CallOpClientRecvStatus {
/// TODO(vjpai): Remove the existence of CallOpSetCollectionInterface
/// and references to it. This code is deprecated-on-arrival and is
/// only added for users that bypassed the code-generator.
class CallOpSetCollectionInterface {};
class CallOpSetCollectionInterface {
public:
virtual ~CallOpSetCollectionInterface() {}
};
/// An abstract collection of call ops, used to generate the
/// grpc_call_op structure to pass down to the lower layers,
@ -613,9 +620,11 @@ class CallOpSet : public CallOpSetInterface,
// TODO(vjpai): Remove the reference to collection_ once the idea of
// bypassing the code generator is forbidden. It is already deprecated
grpc_call* call = call_;
collection_.reset();
g_core_codegen_interface->grpc_call_unref(call_);
g_core_codegen_interface->grpc_call_unref(call);
return true;
}
@ -673,7 +682,7 @@ class Call final {
grpc_call* call_;
int max_receive_message_size_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CALL_H

@ -21,6 +21,7 @@
namespace grpc {
namespace internal {
class CallOpSetInterface;
class Call;
@ -31,6 +32,7 @@ class CallHook {
virtual ~CallHook() {}
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
};
} // namespace internal
} // namespace grpc

@ -24,10 +24,8 @@
#include <grpc/impl/codegen/connectivity_state.h>
namespace grpc {
class Call;
class ChannelInterface;
class ClientContext;
class RpcMethod;
class CallOpSetInterface;
class CompletionQueue;
template <class R>
@ -45,6 +43,16 @@ class ClientAsyncReaderWriter;
template <class R>
class ClientAsyncResponseReader;
namespace internal {
class Call;
class CallOpSetInterface;
class RpcMethod;
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
} // namespace internal
/// Codegen interface for \a grpc::Channel.
class ChannelInterface {
public:
@ -96,15 +104,16 @@ class ChannelInterface {
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class ::grpc::RpcMethod;
virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
friend Status(::grpc::internal::BlockingUnaryCall)(
ChannelInterface* channel, const internal::RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
friend class ::grpc::internal::RpcMethod;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
@ -112,7 +121,6 @@ class ChannelInterface {
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0;
};
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CHANNEL_INTERFACE_H

@ -60,7 +60,18 @@ class Channel;
class ChannelInterface;
class CompletionQueue;
class CallCredentials;
class ClientContext;
namespace internal {
class RpcMethod;
class CallOpClientRecvStatus;
class CallOpRecvInitialMetadata;
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
} // namespace internal
template <class R>
class ClientReader;
template <class W>
@ -146,7 +157,7 @@ class InteropClientContextInspector;
///
/// Context settings are only relevant to the call they are invoked with, that
/// is to say, they aren't sticky. Some of these settings, such as the
/// compression options, can be made persistant at channel construction time
/// compression options, can be made persistent at channel construction time
/// (see \a grpc::CreateCustomChannel).
///
/// \warning ClientContext instances should \em not be reused across rpcs.
@ -229,7 +240,7 @@ class ClientContext {
/// EXPERIMENTAL: Set this request to be cacheable.
/// If set, grpc is free to use the HTTP GET verb for sending the request,
/// with the possibility of receiving a cached respone.
/// with the possibility of receiving a cached response.
void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
/// EXPERIMENTAL: Trigger wait-for-ready or not on this request.
@ -275,7 +286,7 @@ class ClientContext {
/// client’s identity, role, or whether it is authorized to make a particular
/// call.
///
/// \see http://www.grpc.io/docs/guides/auth.html
/// \see https://grpc.io/docs/guides/auth.html
void set_credentials(const std::shared_ptr<CallCredentials>& creds) {
creds_ = creds;
}
@ -345,8 +356,8 @@ class ClientContext {
ClientContext& operator=(const ClientContext&);
friend class ::grpc::testing::InteropClientContextInspector;
friend class CallOpClientRecvStatus;
friend class CallOpRecvInitialMetadata;
friend class ::grpc::internal::CallOpClientRecvStatus;
friend class ::grpc::internal::CallOpRecvInitialMetadata;
friend class Channel;
template <class R>
friend class ::grpc::ClientReader;
@ -363,11 +374,10 @@ class ClientContext {
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend Status(::grpc::internal::BlockingUnaryCall)(
ChannelInterface* channel, const internal::RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
grpc_call* call() const { return call_; }
void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
@ -399,8 +409,8 @@ class ClientContext {
mutable std::shared_ptr<const AuthContext> auth_context_;
struct census_context* census_context_;
std::multimap<grpc::string, grpc::string> send_initial_metadata_;
MetadataMap recv_initial_metadata_;
MetadataMap trailing_metadata_;
internal::MetadataMap recv_initial_metadata_;
internal::MetadataMap trailing_metadata_;
grpc_call* propagate_from_call_;
PropagationOptions propagation_options_;

@ -30,8 +30,9 @@ namespace grpc {
class Channel;
class ClientContext;
class CompletionQueue;
class RpcMethod;
namespace internal {
class RpcMethod;
/// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
@ -67,6 +68,7 @@ Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
return status;
}
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CLIENT_UNARY_CALL_H

@ -56,7 +56,19 @@ class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
}
} // namespace internal
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueue;
class Server;
class ServerBuilder;
class ServerContext;
namespace internal {
class CompletionQueueTag;
class RpcMethod;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -66,16 +78,13 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class UnknownMethodHandler;
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueueTag;
class CompletionQueue;
class RpcMethod;
class Server;
class ServerBuilder;
class ServerContext;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
} // namespace internal
extern CoreCodegenInterface* g_core_codegen_interface;
@ -196,28 +205,27 @@ class CompletionQueue : private GrpcLibraryCodegen {
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend Status(::grpc::internal::BlockingUnaryCall)(
ChannelInterface* channel, const internal::RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result);
NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
/// Wraps \a grpc_completion_queue_pluck.
/// \warning Must not be mixed with calls to \a Next.
bool Pluck(CompletionQueueTag* tag) {
bool Pluck(internal::CompletionQueueTag* tag) {
auto deadline =
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
@ -238,7 +246,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// implementation to simple call the other TryPluck function with a zero
/// timeout. i.e:
/// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
void TryPluck(CompletionQueueTag* tag) {
void TryPluck(internal::CompletionQueueTag* tag) {
auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
@ -254,7 +262,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
///
/// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
/// that the tag is internal not something that is returned to the user.
void TryPluck(CompletionQueueTag* tag, gpr_timespec deadline) {
void TryPluck(internal::CompletionQueueTag* tag, gpr_timespec deadline) {
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {

@ -21,6 +21,7 @@
namespace grpc {
namespace internal {
/// An interface allowing implementors to process and filter event tags.
class CompletionQueueTag {
public:
@ -31,6 +32,7 @@ class CompletionQueueTag {
/// queue
virtual bool FinalizeResult(void** tag, bool* status) = 0;
};
} // namespace internal
} // namespace grpc

@ -23,6 +23,7 @@
namespace grpc {
namespace internal {
class MetadataMap {
public:
MetadataMap() { memset(&arr_, 0, sizeof(arr_)); }
@ -50,6 +51,7 @@ class MetadataMap {
grpc_metadata_array arr_;
std::multimap<grpc::string_ref, grpc::string_ref> map_;
};
} // namespace internal
} // namespace grpc

@ -25,6 +25,7 @@
namespace grpc {
namespace internal {
/// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler : public MethodHandler {
@ -141,6 +142,9 @@ class ServerStreamingHandler : public MethodHandler {
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
if (param.server_context->has_pending_ops_) {
param.call->cq()->Pluck(&param.server_context->pending_ops_);
}
param.call->cq()->Pluck(&ops);
}
@ -185,6 +189,9 @@ class TemplatedBidiStreamingHandler : public MethodHandler {
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
if (param.server_context->has_pending_ops_) {
param.call->cq()->Pluck(&param.server_context->pending_ops_);
}
param.call->cq()->Pluck(&ops);
}
@ -259,6 +266,7 @@ class UnknownMethodHandler : public MethodHandler {
}
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H

@ -24,7 +24,7 @@
#include <grpc++/impl/codegen/channel_interface.h>
namespace grpc {
namespace internal {
/// Descriptor of an RPC method
class RpcMethod {
public:
@ -55,6 +55,7 @@ class RpcMethod {
void* const channel_tag_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_RPC_METHOD_H

@ -35,8 +35,8 @@ struct grpc_byte_buffer;
namespace grpc {
class ServerContext;
class StreamContextInterface;
namespace internal {
/// Base class for running an RPC handler.
class MethodHandler {
public:
@ -71,6 +71,7 @@ class RpcServiceMethod : public RpcMethod {
void* server_tag_;
std::unique_ptr<MethodHandler> handler_;
};
} // namespace internal
} // namespace grpc

@ -25,6 +25,7 @@
#include <grpc/impl/codegen/compression_types.h>
#include <grpc++/impl/codegen/call.h>
#include <grpc++/impl/codegen/completion_queue_tag.h>
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/create_auth_context.h>
@ -54,7 +55,6 @@ class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -64,9 +64,11 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class UnknownMethodHandler;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
class Call;
class CallOpBuffer;
} // namespace internal
class CompletionQueue;
class Server;
class ServerInterface;
@ -246,14 +248,14 @@ class ServerContext {
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::ClientContext;
/// Prevent copying.
@ -262,9 +264,9 @@ class ServerContext {
class CompletionOp;
void BeginCompletionOp(Call* call);
void BeginCompletionOp(internal::Call* call);
/// Return the tag queued by BeginCompletionOp()
CompletionQueueTag* GetCompletionOpTag();
internal::CompletionQueueTag* GetCompletionOpTag();
ServerContext(gpr_timespec deadline, grpc_metadata_array* arr);
@ -281,13 +283,18 @@ class ServerContext {
CompletionQueue* cq_;
bool sent_initial_metadata_;
mutable std::shared_ptr<const AuthContext> auth_context_;
MetadataMap client_metadata_;
internal::MetadataMap client_metadata_;
std::multimap<grpc::string, grpc::string> initial_metadata_;
std::multimap<grpc::string, grpc::string> trailing_metadata_;
bool compression_level_set_;
grpc_compression_level compression_level_;
grpc_compression_algorithm compression_algorithm_;
internal::CallOpSet<internal::CallOpSendInitialMetadata,
internal::CallOpSendMessage>
pending_ops_;
bool has_pending_ops_;
};
} // namespace grpc

@ -29,20 +29,21 @@ namespace grpc {
class AsyncGenericService;
class GenericServerContext;
class RpcService;
class ServerAsyncStreamingInterface;
class ServerCompletionQueue;
class ServerContext;
class ServerCredentials;
class Service;
class ThreadPoolInterface;
extern CoreCodegenInterface* g_core_codegen_interface;
/// Models a gRPC server.
///
/// Servers are configured and started via \a grpc::ServerBuilder.
class ServerInterface : public CallHook {
namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
class ServerInterface : public internal::CallHook {
public:
virtual ~ServerInterface() {}
@ -77,7 +78,7 @@ class ServerInterface : public CallHook {
virtual void Wait() = 0;
protected:
friend class Service;
friend class ::grpc::Service;
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
@ -115,12 +116,13 @@ class ServerInterface : public CallHook {
virtual grpc_server* server() = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
class BaseAsyncRequest : public CompletionQueueTag {
class BaseAsyncRequest : public internal::CompletionQueueTag {
public:
BaseAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag,
bool delete_on_finalize);
virtual ~BaseAsyncRequest();
@ -130,7 +132,7 @@ class ServerInterface : public CallHook {
protected:
ServerInterface* const server_;
ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_;
internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
void* const tag_;
const bool delete_on_finalize_;
@ -140,7 +142,7 @@ class ServerInterface : public CallHook {
class RegisteredAsyncRequest : public BaseAsyncRequest {
public:
RegisteredAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag);
// uses BaseAsyncRequest::FinalizeResult
@ -154,7 +156,7 @@ class ServerInterface : public CallHook {
public:
NoPayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
: RegisteredAsyncRequest(server, context, stream, call_cq, tag) {
@ -169,7 +171,7 @@ class ServerInterface : public CallHook {
public:
PayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* request)
@ -195,7 +197,7 @@ class ServerInterface : public CallHook {
class GenericAsyncRequest : public BaseAsyncRequest {
public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
@ -207,8 +209,9 @@ class ServerInterface : public CallHook {
};
template <class Message>
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context,
ServerAsyncStreamingInterface* stream,
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* message) {
@ -218,8 +221,9 @@ class ServerInterface : public CallHook {
message);
}
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context,
ServerAsyncStreamingInterface* stream,
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
GPR_CODEGEN_ASSERT(method);
@ -228,7 +232,7 @@ class ServerInterface : public CallHook {
}
void RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {

@ -28,13 +28,14 @@
namespace grpc {
class Call;
class CompletionQueue;
class Server;
class ServerInterface;
class ServerCompletionQueue;
class ServerContext;
namespace internal {
class Call;
class ServerAsyncStreamingInterface {
public:
virtual ~ServerAsyncStreamingInterface() {}
@ -48,9 +49,10 @@ class ServerAsyncStreamingInterface {
virtual void SendInitialMetadata(void* tag) = 0;
private:
friend class ServerInterface;
friend class ::grpc::ServerInterface;
virtual void BindCall(Call* call) = 0;
};
} // namespace internal
/// Desriptor of an RPC service and its various RPC methods
class Service {
@ -88,40 +90,38 @@ class Service {
protected:
template <class Message>
void RequestAsyncUnary(int index, ServerContext* context, Message* request,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncClientStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncClientStreaming(
int index, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag);
}
template <class Message>
void RequestAsyncServerStreaming(int index, ServerContext* context,
Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncServerStreaming(
int index, ServerContext* context, Message* request,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncBidiStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncBidiStreaming(
int index, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag);
}
void AddMethod(RpcServiceMethod* method) { methods_.emplace_back(method); }
void AddMethod(internal::RpcServiceMethod* method) {
methods_.emplace_back(method);
}
void MarkMethodAsync(int index) {
GPR_CODEGEN_ASSERT(
@ -139,7 +139,7 @@ class Service {
methods_[index].reset();
}
void MarkMethodStreamed(int index, MethodHandler* streamed_method) {
void MarkMethodStreamed(int index, internal::MethodHandler* streamed_method) {
GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
"Cannot mark an async or generic method Streamed");
methods_[index]->SetHandler(streamed_method);
@ -148,14 +148,14 @@ class Service {
// case of BIDI_STREAMING that has 1 read and 1 write, in that order,
// and split server-side streaming is BIDI_STREAMING with 1 read and
// any number of writes, in that order.
methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING);
methods_[index]->SetMethodType(internal::RpcMethod::BIDI_STREAMING);
}
private:
friend class Server;
friend class ServerInterface;
ServerInterface* server_;
std::vector<std::unique_ptr<RpcServiceMethod>> methods_;
std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_;
};
} // namespace grpc

@ -30,6 +30,7 @@
namespace grpc {
namespace internal {
/// Common interface for all synchronous client side streaming.
class ClientStreamingInterface {
public:
@ -62,20 +63,6 @@ class ClientStreamingInterface {
virtual Status Finish() = 0;
};
/// Common interface for all synchronous server side streaming.
class ServerStreamingInterface {
public:
virtual ~ServerStreamingInterface() {}
/// Block to send initial metadata to client.
/// This call is optional, but if it is used, it cannot be used concurrently
/// with or after the \a Finish method.
///
/// The initial metadata that will be sent to the client will be
/// taken from the \a ServerContext associated with the call.
virtual void SendInitialMetadata() = 0;
};
/// An interface that yields a sequence of messages of type \a R.
template <class R>
class ReaderInterface {
@ -141,16 +128,55 @@ class WriterInterface {
}
};
} // namespace internal
/// Client-side interface for streaming reads of message of type \a R.
template <class R>
class ClientReaderInterface : public ClientStreamingInterface,
public ReaderInterface<R> {
class ClientReaderInterface : public internal::ClientStreamingInterface,
public internal::ReaderInterface<R> {
public:
/// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before
/// the first read. Calling this method is optional, and if it is not called
/// the metadata will be available in ClientContext after the first read.
virtual void WaitForInitialMetadata() = 0;
};
/// Client-side interface for streaming writes of message type \a W.
template <class W>
class ClientWriterInterface : public internal::ClientStreamingInterface,
public internal::WriterInterface<W> {
public:
/// Half close writing from the client. (signal that the stream of messages
/// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed.
/// Thread safe with respect to \a ReaderInterface::Read operations only
///
/// \return Whether the writes were successful.
virtual bool WritesDone() = 0;
};
/// Client-side interface for bi-directional streaming with
/// client-to-server stream messages of type \a W and
/// server-to-client stream messages of type \a R.
template <class W, class R>
class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
public internal::WriterInterface<W>,
public internal::ReaderInterface<R> {
public:
/// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before
/// the first read. Calling this method is optional, and if it is not called
/// the metadata will be available in ClientContext after the first read.
virtual void WaitForInitialMetadata() = 0;
/// Half close writing from the client. (signal that the stream of messages
/// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed.
/// Thread-safe with respect to \a ReaderInterface::Read
///
/// \return Whether the writes were successful.
virtual bool WritesDone() = 0;
};
/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
@ -159,28 +185,14 @@ class ClientReaderInterface : public ClientStreamingInterface,
template <class R>
class ClientReader final : public ClientReaderInterface<R> {
public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
struct internal {
template <class W>
static ClientReader* Create(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request) {
return new ClientReader(channel, method, context, request);
}
};
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics.
@ -192,7 +204,8 @@ class ClientReader final : public ClientReaderInterface<R> {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); /// status ignored
@ -209,7 +222,9 @@ class ClientReader final : public ClientReaderInterface<R> {
/// already received (if initial metadata is received, it can be then
/// accessed through the \a ClientContext associated with this call).
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -224,7 +239,7 @@ class ClientReader final : public ClientReaderInterface<R> {
/// The \a ClientContext associated with this call is updated with
/// possible metadata received from the server.
Status Finish() override {
CallOpSet<CallOpClientRecvStatus> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
Status status;
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
@ -235,53 +250,48 @@ class ClientReader final : public ClientReaderInterface<R> {
private:
ClientContext* context_;
CompletionQueue cq_;
Call call_;
};
::grpc::internal::Call call_;
/// Client-side interface for streaming writes of message type \a W.
template <class W>
class ClientWriterInterface : public ClientStreamingInterface,
public WriterInterface<W> {
public:
/// Half close writing from the client. (signal that the stream of messages
/// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed.
/// Thread safe with respect to \a ReaderInterface::Read operations only
///
/// \return Whether the writes were successful.
virtual bool WritesDone() = 0;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
};
/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
/// where the outgoing message stream coming from the client has messages of
/// type \a W.
template <class W>
class ClientWriter : public ClientWriterInterface<W> {
class ClientWriter final : public ClientWriterInterface<W> {
public:
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
struct internal {
template <class R>
static ClientWriter* Create(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response) {
return new ClientWriter(channel, method, context, response);
}
}
};
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics.
@ -292,7 +302,8 @@ class ClientWriter : public ClientWriterInterface<W> {
void WaitForInitialMetadata() {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
@ -304,10 +315,11 @@ class ClientWriter : public ClientWriterInterface<W> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call).
using WriterInterface<W>::Write;
using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
if (options.is_last_message()) {
@ -328,7 +340,7 @@ class ClientWriter : public ClientWriterInterface<W> {
}
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
@ -353,61 +365,55 @@ class ClientWriter : public ClientWriterInterface<W> {
private:
ClientContext* context_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
CompletionQueue cq_;
Call call_;
};
/// Client-side interface for bi-directional streaming with
/// client-to-server stream messages of type \a W and
/// server-to-client stream messages of type \a R.
template <class W, class R>
class ClientReaderWriterInterface : public ClientStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {
public:
/// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before
/// the first read. Calling this method is optional, and if it is not called
/// the metadata will be available in ClientContext after the first read.
virtual void WaitForInitialMetadata() = 0;
/// Half close writing from the client. (signal that the stream of messages
/// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed.
/// Thread-safe with respect to \a ReaderInterface::Read
///
/// \return Whether the writes were successful.
virtual bool WritesDone() = 0;
};
::grpc::internal::Call call_;
/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
/// where the outgoing message stream coming from the client has messages of
/// type \a W, and the incoming messages stream coming from the server has
/// messages of type \a R.
template <class W, class R>
class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context)
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
};
/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
/// where the outgoing message stream coming from the client has messages of
/// type \a W, and the incoming messages stream coming from the server has
/// messages of type \a R.
template <class W, class R>
class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
public:
struct internal {
static ClientReaderWriter* Create(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context) {
return new ClientReaderWriter(channel, method, context);
}
};
/// Block waiting to read initial metadata from the server.
/// This call is optional, but if it is used, it cannot be used concurrently
@ -418,7 +424,8 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
@ -434,7 +441,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Also receives initial metadata if not already received (updates the \a
/// ClientContext associated with this call in that case).
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -448,10 +457,11 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write;
using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
if (options.is_last_message()) {
@ -472,7 +482,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
}
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
@ -484,7 +494,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// - the \a ClientContext associated with this call is updated with
/// possible trailing metadata sent from the server.
Status Finish() override {
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -498,13 +510,61 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
private:
ClientContext* context_;
CompletionQueue cq_;
Call call_;
::grpc::internal::Call call_;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
};
namespace internal {
/// Common interface for all synchronous server side streaming.
class ServerStreamingInterface {
public:
virtual ~ServerStreamingInterface() {}
/// Block to send initial metadata to client.
/// This call is optional, but if it is used, it cannot be used concurrently
/// with or after the \a Finish method.
///
/// The initial metadata that will be sent to the client will be
/// taken from the \a ServerContext associated with the call.
virtual void SendInitialMetadata() = 0;
};
} // namespace internal
/// Server-side interface for streaming reads of message of type \a R.
template <class R>
class ServerReaderInterface : public ServerStreamingInterface,
public ReaderInterface<R> {};
class ServerReaderInterface : public internal::ServerStreamingInterface,
public internal::ReaderInterface<R> {};
/// Server-side interface for streaming writes of message of type \a W.
template <class W>
class ServerWriterInterface : public internal::ServerStreamingInterface,
public internal::WriterInterface<W> {};
/// Server-side interface for bi-directional streaming.
template <class W, class R>
class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
public internal::WriterInterface<W>,
public internal::ReaderInterface<R> {};
/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
/// where the incoming message stream coming from the client has messages of
@ -512,15 +572,13 @@ class ServerReaderInterface : public ServerStreamingInterface,
template <class R>
class ServerReader final : public ServerReaderInterface<R> {
public:
ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -537,21 +595,22 @@ class ServerReader final : public ServerReaderInterface<R> {
}
bool Read(R* msg) override {
CallOpSet<CallOpRecvMessage<R>> ops;
internal::CallOpSet<internal::CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
private:
Call* const call_;
internal::Call* const call_;
ServerContext* const ctx_;
};
/// Server-side interface for streaming writes of message of type \a W.
template <class W>
class ServerWriterInterface : public ServerStreamingInterface,
public WriterInterface<W> {};
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ClientStreamingHandler;
ServerReader(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
};
/// Synchronous (blocking) server-side API for doing for doing a
/// server-streaming RPCs, where the outgoing message stream coming from the
@ -559,8 +618,6 @@ class ServerWriterInterface : public ServerStreamingInterface,
template <class W>
class ServerWriter final : public ServerWriterInterface<W> {
public:
ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics.
/// Note that initial metadata will be affected by the
@ -568,7 +625,7 @@ class ServerWriter final : public ServerWriterInterface<W> {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -584,37 +641,45 @@ class ServerWriter final : public ServerWriterInterface<W> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write;
using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
if (options.is_last_message()) {
options.set_buffer_hint();
}
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
if (!ctx_->pending_ops_.SendMessage(msg, options).ok()) {
return false;
}
if (!ctx_->sent_initial_metadata_) {
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
ctx_->pending_ops_.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
ops.set_compression_level(ctx_->compression_level());
ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
}
ctx_->sent_initial_metadata_ = true;
}
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops);
call_->PerformOps(&ctx_->pending_ops_);
// if this is the last message we defer the pluck until AFTER we start
// the trailing md op. This prevents hangs. See
// https://github.com/grpc/grpc/issues/11546
if (options.is_last_message()) {
ctx_->has_pending_ops_ = true;
return true;
}
ctx_->has_pending_ops_ = false;
return call_->cq()->Pluck(&ctx_->pending_ops_);
}
private:
Call* const call_;
internal::Call* const call_;
ServerContext* const ctx_;
};
/// Server-side interface for bi-directional streaming.
template <class W, class R>
class ServerReaderWriterInterface : public ServerStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {};
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ServerStreamingHandler;
ServerWriter(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
};
/// Actual implementation of bi-directional streaming
namespace internal {
@ -654,26 +719,34 @@ class ServerReaderWriterBody final {
if (options.is_last_message()) {
options.set_buffer_hint();
}
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
if (!ctx_->pending_ops_.SendMessage(msg, options).ok()) {
return false;
}
if (!ctx_->sent_initial_metadata_) {
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
ctx_->pending_ops_.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
ops.set_compression_level(ctx_->compression_level());
ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
}
ctx_->sent_initial_metadata_ = true;
}
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops);
call_->PerformOps(&ctx_->pending_ops_);
// if this is the last message we defer the pluck until AFTER we start
// the trailing md op. This prevents hangs. See
// https://github.com/grpc/grpc/issues/11546
if (options.is_last_message()) {
ctx_->has_pending_ops_ = true;
return true;
}
ctx_->has_pending_ops_ = false;
return call_->cq()->Pluck(&ctx_->pending_ops_);
}
private:
Call* const call_;
ServerContext* const ctx_;
};
} // namespace internal
/// Synchronous (blocking) server-side API for a bidirectional
@ -683,8 +756,6 @@ class ServerReaderWriterBody final {
template <class W, class R>
class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
public:
ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
@ -701,13 +772,18 @@ class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the \a
/// ServerContext associated with this call).
using WriterInterface<W>::Write;
using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
return body_.Write(msg, options);
}
private:
internal::ServerReaderWriterBody<W, R> body_;
friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
false>;
ServerReaderWriter(internal::Call* call, ServerContext* ctx)
: body_(call, ctx) {}
};
/// A class to represent a flow-controlled unary call. This is something
@ -722,9 +798,6 @@ template <class RequestType, class ResponseType>
class ServerUnaryStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerUnaryStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
/// Block to send initial metadata to client.
/// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for
@ -761,7 +834,7 @@ class ServerUnaryStreamer final
/// \param options The WriteOptions affecting the write operation.
///
/// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write;
using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override {
if (write_done_ || !read_done_) {
return false;
@ -774,6 +847,11 @@ class ServerUnaryStreamer final
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
bool write_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerUnaryStreamer<RequestType, ResponseType>, true>;
ServerUnaryStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
};
/// A class to represent a flow-controlled server-side streaming call.
@ -785,9 +863,6 @@ template <class RequestType, class ResponseType>
class ServerSplitStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerSplitStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
/// Block to send initial metadata to client.
/// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for
@ -824,7 +899,7 @@ class ServerSplitStreamer final
/// \param options The WriteOptions affecting the write operation.
///
/// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write;
using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override {
return read_done_ && body_.Write(response, options);
}
@ -832,6 +907,11 @@ class ServerSplitStreamer final
private:
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerSplitStreamer<RequestType, ResponseType>, false>;
ServerSplitStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
};
} // namespace grpc

@ -19,6 +19,8 @@
#ifndef GRPCXX_IMPL_CODEGEN_TIME_H
#define GRPCXX_IMPL_CODEGEN_TIME_H
#include <chrono>
#include <grpc++/impl/codegen/config.h>
#include <grpc/impl/codegen/grpc_types.h>
@ -59,10 +61,6 @@ class TimePoint<gpr_timespec> {
} // namespace grpc
#include <chrono>
#include <grpc/impl/codegen/grpc_types.h>
namespace grpc {
// from and to should be absolute time.

@ -41,7 +41,7 @@ class SecureCallCredentials;
/// It can make various assertions, e.g., about the client’s identity, role
/// for all the calls on that channel.
///
/// \see http://www.grpc.io/docs/guides/auth.html
/// \see https://grpc.io/docs/guides/auth.html
class ChannelCredentials : private GrpcLibraryCodegen {
public:
ChannelCredentials();
@ -67,7 +67,7 @@ class ChannelCredentials : private GrpcLibraryCodegen {
/// A call credentials object encapsulates the state needed by a client to
/// authenticate with a server for a given call on a channel.
///
/// \see http://www.grpc.io/docs/guides/auth.html
/// \see https://grpc.io/docs/guides/auth.html
class CallCredentials : private GrpcLibraryCodegen {
public:
CallCredentials();

@ -172,7 +172,8 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
/// \param num_cqs How many completion queues does \a cqs hold.
void Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void ShutdownInternal(gpr_timespec deadline) override;

@ -40,7 +40,6 @@ namespace grpc {
class AsyncGenericService;
class ResourceQuota;
class CompletionQueue;
class RpcService;
class Server;
class ServerCompletionQueue;
class ServerCredentials;
@ -196,10 +195,7 @@ class ServerBuilder {
struct SyncServerSettings {
SyncServerSettings()
: num_cqs(GPR_MAX(1, gpr_cpu_num_cores())),
min_pollers(1),
max_pollers(2),
cq_timeout_msec(10000) {}
: num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
/// Number of server completion queues to create to listen to incoming RPCs.
int num_cqs;

@ -287,6 +287,14 @@ typedef struct {
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"
/** String defining the optimization target for a channel.
Can be: "latency" - attempt to minimize latency at the cost of throughput
"blend" - try to balance latency and throughput
"throughput" - attempt to maximize throughput at the expense of
latency
Defaults to "blend". In the current implementation "blend" is equivalent to
"latency". */
#define GRPC_ARG_OPTIMIZATION_TARGET "grpc.optimization_target"
/** \} */
/** Result of a grpc call. If the caller satisfies the prerequisites of a

@ -3,7 +3,7 @@
"version": "1.5.0-dev",
"author": "Google Inc.",
"description": "gRPC Library for Node",
"homepage": "http://www.grpc.io/",
"homepage": "https://grpc.io/",
"repository": {
"type": "git",
"url": "https://github.com/grpc/grpc.git"
@ -56,7 +56,7 @@
},
"binary": {
"module_name": "grpc_node",
"module_path": "src/node/extension_binary",
"module_path": "src/node/extension_binary/{node_abi}-{platform}-{arch}",
"host": "https://storage.googleapis.com/",
"remote_path": "grpc-precompiled-binaries/node/{name}/v{version}",
"package_name": "{node_abi}-{platform}-{arch}.tar.gz"

@ -106,6 +106,7 @@
<file baseinstalldir="/" name="src/core/lib/support/mpscq.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/murmur_hash.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/spinlock.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/stack_lockfree.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/string.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/string_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/thd_internal.h" role="src" />
@ -135,6 +136,7 @@
<file baseinstalldir="/" name="src/core/lib/support/log_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/mpscq.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/murmur_hash.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/stack_lockfree.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/string.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/string_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/string_util_windows.c" role="src" />

@ -281,7 +281,7 @@ setuptools.setup(
description='HTTP/2-based RPC framework',
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='http://www.grpc.io',
url='https://grpc.io',
license=LICENSE,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,

@ -140,7 +140,6 @@ grpc::string GetHeaderIncludes(grpc_generator::File *file,
printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "class CompletionQueue;\n");
printer->Print(vars, "class Channel;\n");
printer->Print(vars, "class RpcService;\n");
printer->Print(vars, "class ServerCompletionQueue;\n");
printer->Print(vars, "class ServerContext;\n");
printer->Print(vars, "} // namespace grpc\n\n");
@ -187,19 +186,21 @@ void PrintHeaderClientMethodInterfaces(
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientWriterInterface< $Request$>>"
"std::unique_ptr< ::grpc::ClientWriterInterface< "
"$Request$>>"
" $Method$("
"::grpc::ClientContext* context, $Response$* response) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientWriterInterface< $Request$>>"
"($Method$Raw(context, response));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientWriterInterface< $Request$>>"
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< "
"$Request$>>"
" Async$Method$(::grpc::ClientContext* context, $Response$* "
"response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
@ -213,19 +214,21 @@ void PrintHeaderClientMethodInterfaces(
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientReaderInterface< $Response$>>"
"std::unique_ptr< ::grpc::ClientReaderInterface< "
"$Response$>>"
" $Method$(::grpc::ClientContext* context, const $Request$& request)"
" {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientReaderInterface< $Response$>>"
"($Method$Raw(context, request));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientReaderInterface< $Response$>>"
"($Method$Raw(context, request));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
"std::unique_ptr< ::grpc::ClientAsyncReaderInterface< "
"$Response$>> "
"Async$Method$("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
@ -242,36 +245,37 @@ void PrintHeaderClientMethodInterfaces(
"$Request$, $Response$>> "
"$Method$(::grpc::ClientContext* context) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientReaderWriterInterface< $Request$, $Response$>>("
"$Method$Raw(context));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientReaderWriterInterface< "
"$Request$, $Response$>>("
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
}
} else {
if (method->NoStreaming()) {
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) = 0;\n");
printer->Print(*vars,
"virtual "
"::grpc::ClientAsyncResponseReaderInterface< "
"$Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) = 0;\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@ -286,7 +290,8 @@ void PrintHeaderClientMethodInterfaces(
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw("
"virtual ::grpc::ClientReaderInterface< $Response$>* "
"$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n");
printer->Print(
*vars,
@ -451,7 +456,8 @@ void PrintHeaderClientMethodData(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
printer->Print(*vars, "const ::grpc::RpcMethod rpcmethod_$Method$_;\n");
printer->Print(*vars,
"const ::grpc::internal::RpcMethod rpcmethod_$Method$_;\n");
}
void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
@ -623,7 +629,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, "
" new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
@ -671,15 +677,16 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(
*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
@ -819,7 +826,8 @@ void PrintHeaderService(grpc_generator::Printer *printer,
" {\n public:\n");
printer->Indent();
printer->Print(
"Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
"Stub(const std::shared_ptr< ::grpc::ChannelInterface>& "
"channel);\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
}
@ -1082,11 +1090,12 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::Status $ns$$Service$::Stub::$Method$("
"::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) {\n");
printer->Print(*vars,
" return ::grpc::BlockingUnaryCall(channel_.get(), "
"rpcmethod_$Method$_, "
"context, request, response);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::internal::BlockingUnaryCall(channel_.get(), "
"rpcmethod_$Method$_, "
"context, request, response);\n"
"}\n\n");
printer->Print(
*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
@ -1095,7 +1104,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"::grpc::ClientAsyncResponseReader< $Response$>::"
"internal::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request);\n"
@ -1105,19 +1115,21 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientWriter< $Request$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, $Response$* response) {\n");
printer->Print(*vars,
" return new ::grpc::ClientWriter< $Request$>("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, response);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::ClientWriter< $Request$>::internal::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, response);\n"
"}\n\n");
printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* "
"$ns$$Service$::Stub::Async$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
" return ::grpc::ClientAsyncWriter< $Request$>::"
"internal::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, tag);\n"
@ -1128,19 +1140,21 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReader< $Response$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) {\n");
printer->Print(*vars,
" return new ::grpc::ClientReader< $Response$>("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::ClientReader< $Response$>::internal::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
printer->Print(*vars,
"::grpc::ClientAsyncReader< $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncReader< $Response$>::Create("
" return ::grpc::ClientAsyncReader< $Response$>::"
"internal::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, tag);\n"
@ -1151,8 +1165,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::$Method$Raw(::grpc::ClientContext* context) {\n");
printer->Print(*vars,
" return new ::grpc::ClientReaderWriter< "
"$Request$, $Response$>("
" return ::grpc::ClientReaderWriter< "
"$Request$, $Response$>::internal::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context);\n"
@ -1162,14 +1176,14 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"
"}\n\n");
printer->Print(*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::"
"internal::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"
"}\n\n");
}
}
@ -1279,7 +1293,7 @@ void PrintSourceService(grpc_generator::Printer *printer,
printer->Print(*vars,
", rpcmethod_$Method$_("
"$prefix$$Service$_method_names[$Idx$], "
"::grpc::RpcMethod::$StreamingType$, "
"::grpc::internal::RpcMethod::$StreamingType$, "
"channel"
")\n");
}
@ -1302,38 +1316,38 @@ void PrintSourceService(grpc_generator::Printer *printer,
if (method->NoStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::NORMAL_RPC,\n"
" new ::grpc::RpcMethodHandler< $ns$$Service$::Service, "
" ::grpc::internal::RpcMethod::NORMAL_RPC,\n"
" new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ClientOnlyStreaming(method.get())) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::ClientStreamingHandler< "
" ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::ServerStreamingHandler< "
" ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::BidiStreamingHandler< "
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
}
@ -1501,7 +1515,8 @@ void PrintMockClientMethods(grpc_generator::Printer *printer,
printer->Print(
*vars,
"MOCK_METHOD3(Async$Method$Raw, "
"::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
"::grpc::ClientAsyncReaderWriterInterface<$Request$, "
"$Response$>*"
"(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, "
"void* tag));\n");
}

@ -52,6 +52,8 @@
/* Client channel implementation */
grpc_tracer_flag grpc_client_channel_trace = GRPC_TRACER_INITIALIZER(false);
/*************************************************************************
* METHOD-CONFIG TABLE
*/
@ -241,6 +243,10 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_REF(error));
}
}
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
grpc_connectivity_state_name(state));
}
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
reason);
}
@ -251,6 +257,10 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state));
}
if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
@ -263,7 +273,6 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
}
}
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
gpr_free(w);
}
@ -273,7 +282,6 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
grpc_combiner_scheduler(chand->combiner));
@ -283,6 +291,18 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
}
GPR_ASSERT(!chand->started_resolving);
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next_locked(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
}
typedef struct {
char *server_name;
grpc_server_retry_throttle_data *retry_throttle_data;
@ -345,8 +365,13 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
char *lb_policy_name = NULL;
bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
grpc_server_retry_throttle_data *retry_throttle_data = NULL;
@ -394,10 +419,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
const bool lb_policy_type_changed =
lb_policy_name_changed =
chand->info_lb_policy_name == NULL ||
strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != NULL && !lb_policy_type_changed) {
if (chand->lb_policy != NULL && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
} else {
@ -445,6 +470,13 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "",
service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
// be NULL if (e.g.) the resolver failed to return results or the
// results did not contain the necessary data.
@ -479,6 +511,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (new_lb_policy != NULL || error != GRPC_ERROR_NONE ||
chand->resolver == NULL) {
if (chand->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
chand->lb_policy);
}
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
@ -489,7 +525,13 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Now that we've swapped out the relevant fields of chand, check for
// error or shutdown.
if (error != GRPC_ERROR_NONE || chand->resolver == NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
}
if (chand->resolver != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
}
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
@ -510,6 +552,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_error *state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (new_lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
}
GRPC_ERROR_UNREF(state_error);
state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
&state_error);
@ -772,7 +817,9 @@ typedef struct client_channel_call_data {
gpr_atm subchannel_call_or_error;
gpr_arena *arena;
bool pick_pending;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity *pollent;
@ -837,8 +884,15 @@ static void waiting_for_pick_batches_add_locked(
}
static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
call_data *calld,
grpc_call_element *elem,
grpc_error *error) {
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
grpc_error_string(error));
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
@ -848,14 +902,21 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
}
static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
call_data *calld) {
grpc_call_element *elem) {
call_data *calld = elem->call_data;
if (calld->waiting_for_pick_batches_count == 0) return;
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
waiting_for_pick_batches_fail_locked(exec_ctx, calld,
waiting_for_pick_batches_fail_locked(exec_ctx, elem,
GRPC_ERROR_REF(coe.error));
return;
}
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
coe.subchannel_call);
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
calld->waiting_for_pick_batches[i]);
@ -869,6 +930,10 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
}
if (chand->retry_throttle_data != NULL) {
calld->retry_throttle_data =
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
@ -895,7 +960,9 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
}
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
call_data *calld, grpc_error *error) {
grpc_call_element *elem,
grpc_error *error) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call = NULL;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
@ -906,13 +973,18 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
.context = calld->subchannel_call_context};
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
elem->channel_data, calld, subchannel_call,
grpc_error_string(new_error));
}
GPR_ASSERT(set_call_or_error(
calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
waiting_for_pick_batches_fail_locked(exec_ctx, calld, new_error);
waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
} else {
waiting_for_pick_batches_resume_locked(exec_ctx, calld);
waiting_for_pick_batches_resume_locked(exec_ctx, elem);
}
GRPC_ERROR_UNREF(error);
}
@ -922,8 +994,6 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(calld->pick_pending);
calld->pick_pending = false;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
call_or_error coe = get_call_or_error(calld);
@ -935,8 +1005,13 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
"Call dropped by load balancing policy")
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
calld, grpc_error_string(failure));
}
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
waiting_for_pick_batches_fail_locked(exec_ctx, calld, failure);
waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
} else if (coe.error != GRPC_ERROR_NONE) {
/* already cancelled before subchannel became ready */
grpc_error *child_errors[] = {error, coe.error};
@ -950,10 +1025,15 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_DEADLINE_EXCEEDED);
}
waiting_for_pick_batches_fail_locked(exec_ctx, calld, cancellation_error);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelled before subchannel became ready: %s",
chand, calld, grpc_error_string(cancellation_error));
}
waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
} else {
/* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error);
@ -983,41 +1063,77 @@ typedef struct {
grpc_closure closure;
} pick_after_resolver_result_args;
static void continue_picking_after_resolver_result_locked(
grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args = arg;
if (args->cancelled) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
} else {
if (pick_subchannel_locked(exec_ctx, args->elem)) {
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
channel_data *chand = args->elem->channel_data;
call_data *calld = args->elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
} else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
if (pick_subchannel_locked(exec_ctx, args->elem)) {
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
}
}
}
gpr_free(args);
}
static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error) {
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE);
}
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
// If we don't yet have a resolver result, then a closure for
// continue_picking_after_resolver_result_locked() will have been added to
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
// until after this call has been destroyed. We mark the operation as
// cancelled, so that when continue_picking_after_resolver_result_locked()
// cancelled, so that when pick_after_resolver_result_done_locked()
// is called, it will be a no-op. We also immediately invoke
// subchannel_ready_locked() to propagate the error back to the caller.
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
closure != NULL; closure = closure->next_data.next) {
pick_after_resolver_result_args *args = closure->cb_arg;
if (!args->cancelled && args->elem == elem) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: "
"cancelling pick waiting for resolver result",
chand, calld);
}
args->cancelled = true;
subchannel_ready_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -1027,24 +1143,21 @@ static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GRPC_ERROR_UNREF(error);
}
// State for pick callback that holds a reference to the LB policy
// from which the pick was requested.
typedef struct {
grpc_lb_policy *lb_policy;
grpc_call_element *elem;
grpc_closure closure;
} pick_callback_args;
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy after invoking subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
pick_callback_args *args = arg;
GPR_ASSERT(args != NULL);
GPR_ASSERT(args->lb_policy != NULL);
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
GRPC_LB_POLICY_UNREF(exec_ctx, args->lb_policy, "pick_subchannel");
gpr_free(args);
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
@ -1055,23 +1168,44 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
const grpc_lb_policy_pick_args *inputs) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
pick_callback_args *pick_args = gpr_zalloc(sizeof(*pick_args));
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
pick_args->lb_policy = chand->lb_policy;
pick_args->elem = elem;
GRPC_CLOSURE_INIT(&pick_args->closure, pick_callback_done_locked, pick_args,
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
calld->subchannel_call_context, NULL, &pick_args->closure);
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "pick_subchannel");
gpr_free(pick_args);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
}
return pick_done;
}
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
GPR_ASSERT(calld->lb_policy != NULL);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
&calld->connected_subchannel, error);
}
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
@ -1107,20 +1241,9 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
} else if (chand->resolver != NULL) {
if (!chand->started_resolving) {
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next_locked(exec_ctx, chand->resolver,
&chand->resolver_result,
&chand->on_resolver_result_changed);
start_resolving_locked(exec_ctx, chand);
}
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure,
continue_picking_after_resolver_result_locked, args,
grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE);
pick_after_resolver_result_start_locked(exec_ctx, elem);
} else {
subchannel_ready_locked(
exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
@ -1133,63 +1256,77 @@ static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
grpc_transport_stream_op_batch *op = arg;
grpc_call_element *elem = op->handler_private.extra_arg;
grpc_transport_stream_op_batch *batch = arg;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
/* need to recheck that another thread hasn't set the call */
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(coe.error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(coe.error));
exec_ctx, batch, GRPC_ERROR_REF(coe.error));
goto done;
}
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
calld, coe.subchannel_call);
}
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
goto done;
}
// Add to waiting-for-pick list. If we succeed in getting a
// subchannel call below, we'll handle this batch (along with any
// other waiting batches) in waiting_for_pick_batches_resume_locked().
waiting_for_pick_batches_add_locked(calld, op);
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_stream) {
grpc_error *error = op->payload->cancel_stream.cancel_error;
waiting_for_pick_batches_add_locked(calld, batch);
// If this is a cancellation, cancel the pending pick (if any) and
// fail any pending batches.
if (batch->cancel_stream) {
grpc_error *error = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(error));
}
/* Stash a copy of cancel_error in our call data, so that we can use
it for subsequent operations. This ensures that if the call is
cancelled before any ops are passed down (e.g., if the deadline
cancelled before any batches are passed down (e.g., if the deadline
is in the past when the call starts), we can return the right
error to the caller when the first op does get passed down. */
error to the caller when the first batch does get passed down. */
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
if (calld->pick_pending) {
cancel_pick_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
if (calld->lb_policy != NULL) {
pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
pick_after_resolver_result_cancel_locked(exec_ctx, elem,
GRPC_ERROR_REF(error));
}
waiting_for_pick_batches_fail_locked(exec_ctx, calld,
GRPC_ERROR_REF(error));
waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
goto done;
}
/* if we don't have a subchannel, try to get one */
if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) {
calld->initial_metadata_payload = op->payload;
calld->pick_pending = true;
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->connected_subchannel == NULL);
calld->initial_metadata_payload = batch->payload;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
if (pick_subchannel_locked(exec_ctx, elem)) {
// Pick was returned synchronously.
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
if (calld->connected_subchannel == NULL) {
grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
set_call_or_error(calld,
(call_or_error){.error = GRPC_ERROR_REF(error)});
waiting_for_pick_batches_fail_locked(exec_ctx, calld, error);
waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
} else {
// Create subchannel call.
create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_NONE);
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
@ -1232,47 +1369,59 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
GRPC_TRACER_ON(grpc_trace_channel)) {
grpc_call_log_op(GPR_INFO, elem, batch);
}
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
op);
batch);
}
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (op->recv_trailing_metadata) {
GPR_ASSERT(op->on_complete != NULL);
calld->original_on_complete = op->on_complete;
if (batch->recv_trailing_metadata) {
GPR_ASSERT(batch->on_complete != NULL);
calld->original_on_complete = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
grpc_schedule_on_exec_ctx);
op->on_complete = &calld->on_complete;
batch->on_complete = &calld->on_complete;
}
/* try to (atomically) get the call */
call_or_error coe = get_call_or_error(calld);
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
if (coe.error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(coe.error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(coe.error));
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
exec_ctx, batch, GRPC_ERROR_REF(coe.error));
goto done;
}
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
calld, coe.subchannel_call);
}
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
goto done;
}
/* we failed; lock and figure out what to do */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
}
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
op->handler_private.extra_arg = elem;
batch->handler_private.extra_arg = elem;
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_INIT(&op->handler_private.closure,
start_transport_stream_op_batch_locked, op,
exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
start_transport_stream_op_batch_locked, batch,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@ -1317,7 +1466,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(!calld->pick_pending);
GPR_ASSERT(calld->lb_policy == NULL);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
@ -1366,11 +1515,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = true;
grpc_resolver_next_locked(exec_ctx, chand->resolver,
&chand->resolver_result,
&chand->on_resolver_result_changed);
start_resolving_locked(exec_ctx, chand);
}
}
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");

@ -23,6 +23,8 @@
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
extern grpc_tracer_flag grpc_client_channel_trace;
// Channel arg key for server URI string.
#define GRPC_ARG_SERVER_URI "grpc.server_uri"

@ -78,6 +78,7 @@ void grpc_client_channel_init(void) {
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void *)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
grpc_register_tracer("client_channel", &grpc_client_channel_trace);
#ifndef NDEBUG
grpc_register_tracer("resolver_refcount", &grpc_trace_resolver_refcount);
#endif

@ -158,6 +158,7 @@ static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
}
gpr_free(subchannel_list->subchannels);
@ -195,11 +196,28 @@ static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
static void rr_subchannel_list_shutdown(grpc_exec_ctx *exec_ctx,
rr_subchannel_list *subchannel_list,
const char *reason) {
if (subchannel_list->shutting_down) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Subchannel list %p already shutting down",
(void *)subchannel_list);
}
return;
};
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Shutting down subchannel_list %p",
(void *)subchannel_list);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe.
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"Unsubscribing from subchannel %p as part of shutting down "
"subchannel_list %p",
(void *)sd->subchannel, (void *)subchannel_list);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
NULL,
&sd->connectivity_changed_closure);
@ -228,13 +246,14 @@ static size_t get_next_ready_subchannel_index_locked(
const size_t index = (i + p->last_ready_subchannel_index + 1) %
p->subchannel_list->num_subchannels;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%d",
(void *)p,
(void *)p->subchannel_list->subchannels[index].subchannel,
(void *)p->subchannel_list, (unsigned long)index,
p->subchannel_list->subchannels[index].curr_connectivity_state);
gpr_log(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
(void *)p, (void *)p->subchannel_list->subchannels[index].subchannel,
(void *)p->subchannel_list, (unsigned long)index,
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
if (p->subchannel_list->subchannels[index].curr_connectivity_state ==
GRPC_CHANNEL_READY) {
@ -511,16 +530,27 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
(void *)p, (void *)sd->subchannel, (void *)sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, "pol_shutdown");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown");
return;
}
if (sd->subchannel_list->shutting_down) {
if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) {
// the subchannel list associated with sd has been discarded. This callback
// corresponds to the unsubscription.
GPR_ASSERT(error == GRPC_ERROR_CANCELLED);
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, "sl_shutdown");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown");
return;
@ -536,13 +566,6 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p: "
"prev_state=%d new_state=%d",
(void *)p, (void *)sd->subchannel, sd->prev_connectivity_state,
sd->curr_connectivity_state);
}
// Update state counters and determine new overall state.
update_state_counters_locked(sd);
sd->prev_connectivity_state = sd->curr_connectivity_state;
@ -556,6 +579,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
/* the policy is shutting down. Flush all the pending picks... */

@ -132,7 +132,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
dns_resolver *r = (dns_resolver *)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
@ -146,7 +146,7 @@ static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
dns_resolver *r = (dns_resolver *)arg;
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
@ -241,7 +241,7 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
char *path = args->uri->path;
if (path[0] == '/') ++path;
// Create resolver.
dns_resolver *r = gpr_zalloc(sizeof(dns_resolver));
dns_resolver *r = (dns_resolver *)gpr_zalloc(sizeof(dns_resolver));
grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);

@ -177,7 +177,8 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
/* Instantiate resolver. */
sockaddr_resolver *r = gpr_zalloc(sizeof(sockaddr_resolver));
sockaddr_resolver *r =
(sockaddr_resolver *)gpr_zalloc(sizeof(sockaddr_resolver));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);

@ -37,8 +37,8 @@
// Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
@ -57,7 +57,7 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
return;
}
grpc_deadline_state* deadline_state = elem->call_data;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
retry:
@ -112,7 +112,7 @@ static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
// Callback run when the call is complete.
static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
@ -145,7 +145,7 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
@ -169,13 +169,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
}
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);
}
@ -183,7 +183,7 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
grpc_deadline_state* deadline_state = elem->call_data;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (op->cancel_stream) {
cancel_timer_if_needed(exec_ctx, deadline_state);
} else {
@ -256,8 +256,8 @@ static void client_start_transport_stream_op_batch(
// Callback for receiving initial metadata on the server.
static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
server_call_data* calld = elem->call_data;
grpc_call_element* elem = (grpc_call_element*)arg;
server_call_data* calld = (server_call_data*)elem->call_data;
// Get deadline from metadata and start the timer if needed.
start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
// Invoke the next callback.
@ -269,7 +269,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
static void server_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
server_call_data* calld = elem->call_data;
server_call_data* calld = (server_call_data*)elem->call_data;
if (op->cancel_stream) {
cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
} else {
@ -341,8 +341,8 @@ static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
void* arg) {
return grpc_deadline_checking_enabled(
grpc_channel_stack_builder_get_channel_arguments(builder))
? grpc_channel_stack_builder_prepend_filter(builder, arg, NULL,
NULL)
? grpc_channel_stack_builder_prepend_filter(
builder, (const grpc_channel_filter*)arg, NULL, NULL)
: true;
}

@ -108,7 +108,7 @@ static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
/* Decrease call_count. If there are no active calls at this time,
max_idle_timer will start here. If the number of active calls is not 0,
max_idle_timer will start after all the active calls end. */
@ -119,7 +119,7 @@ static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
@ -140,7 +140,7 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
@ -156,7 +156,7 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
if (error == GRPC_ERROR_NONE) {
/* Prevent the max idle timer from being set again */
gpr_atm_no_barrier_fetch_add(&chand->call_count, 1);
@ -176,7 +176,7 @@ static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg,
static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = false;
gpr_mu_unlock(&chand->max_age_timer_mu);
@ -200,7 +200,7 @@ static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = false;
gpr_mu_unlock(&chand->max_age_timer_mu);
@ -220,7 +220,7 @@ static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
channel_data* chand = arg;
channel_data* chand = (channel_data*)arg;
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op* op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &chand->channel_connectivity_changed,
@ -264,7 +264,7 @@ static int add_random_max_connection_age_jitter(int value) {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
channel_data* chand = (channel_data*)elem->channel_data;
increase_call_count(exec_ctx, chand);
return GRPC_ERROR_NONE;
}
@ -281,7 +281,7 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
grpc_channel_element_args* args) {
channel_data* chand = elem->channel_data;
channel_data* chand = (channel_data*)elem->channel_data;
gpr_mu_init(&chand->max_age_timer_mu);
chand->max_age_timer_pending = false;
chand->max_age_grace_timer_pending = false;

@ -60,7 +60,8 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
if (max_response_message_bytes == -1) return NULL;
}
}
message_size_limits* value = gpr_malloc(sizeof(message_size_limits));
message_size_limits* value =
(message_size_limits*)gpr_malloc(sizeof(message_size_limits));
value->max_send_size = max_request_message_bytes;
value->max_recv_size = max_response_message_bytes;
return value;
@ -88,8 +89,8 @@ typedef struct channel_data {
// receive message size.
static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
grpc_call_element* elem = (grpc_call_element*)user_data;
call_data* calld = (call_data*)elem->call_data;
if (*calld->recv_message != NULL && calld->limits.max_recv_size >= 0 &&
(*calld->recv_message)->length > (size_t)calld->limits.max_recv_size) {
char* message_string;
@ -117,7 +118,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
static void start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
call_data* calld = elem->call_data;
call_data* calld = (call_data*)elem->call_data;
// Check max send message size.
if (op->send_message && calld->limits.max_send_size >= 0 &&
op->payload->send_message.send_message->length >
@ -149,8 +150,8 @@ static void start_transport_stream_op_batch(
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
@ -160,8 +161,9 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
// size to the receive limit.
calld->limits = chand->limits;
if (chand->method_limit_table != NULL) {
message_size_limits* limits = grpc_method_config_table_get(
exec_ctx, chand->method_limit_table, args->path);
message_size_limits* limits =
(message_size_limits*)grpc_method_config_table_get(
exec_ctx, chand->method_limit_table, args->path);
if (limits != NULL) {
if (limits->max_send_size >= 0 &&
(limits->max_send_size < calld->limits.max_send_size ||
@ -220,7 +222,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
channel_data* chand = elem->channel_data;
channel_data* chand = (channel_data*)elem->channel_data;
chand->limits = get_message_size_limits(args->channel_args);
// Get method config table from channel args.
const grpc_arg* channel_arg =
@ -243,7 +245,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
// Destructor for channel_data.
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {
channel_data* chand = elem->channel_data;
channel_data* chand = (channel_data*)elem->channel_data;
grpc_slice_hash_table_unref(exec_ctx, chand->method_limit_table);
}

@ -52,8 +52,8 @@ static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,
// Callback invoked when we receive an initial metadata.
static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
void* user_data, grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
grpc_call_element* elem = (grpc_call_element*)user_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_ERROR_NONE == error) {
grpc_mdelem md;
@ -75,7 +75,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
static void start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
call_data* calld = elem->call_data;
call_data* calld = (call_data*)elem->call_data;
// Inject callback for receiving initial metadata
if (op->recv_initial_metadata) {
@ -103,7 +103,7 @@ static void start_transport_stream_op_batch(
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
call_data* calld = elem->call_data;
call_data* calld = (call_data*)elem->call_data;
calld->next_recv_initial_metadata_ready = NULL;
calld->workaround_active = false;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,

@ -33,7 +33,8 @@ grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md) {
if (NULL != user_agent_md) {
return user_agent_md;
}
user_agent_md = gpr_malloc(sizeof(grpc_workaround_user_agent_md));
user_agent_md = (grpc_workaround_user_agent_md *)gpr_malloc(
sizeof(grpc_workaround_user_agent_md));
for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) {
if (ua_parser[i]) {
user_agent_md->workaround_active[i] = ua_parser[i](md);

@ -34,6 +34,7 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@ -280,8 +281,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
@ -399,6 +398,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls;
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
@ -487,6 +488,23 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_permit_without_calls =
(uint32_t)grpc_channel_arg_get_integer(
&channel_args->args[i], (grpc_integer_options){0, 0, 1});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_OPTIMIZATION_TARGET)) {
if (channel_args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s should be a string",
GRPC_ARG_OPTIMIZATION_TARGET);
} else if (0 == strcmp(channel_args->args[i].value.string, "blend")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 == strcmp(channel_args->args[i].value.string, "latency")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 ==
strcmp(channel_args->args[i].value.string, "throughput")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT;
} else {
gpr_log(GPR_ERROR, "%s value '%s' unknown, assuming 'blend'",
GRPC_ARG_OPTIMIZATION_TARGET,
channel_args->args[i].value.string);
}
} else {
static const struct {
const char *channel_arg_name;
@ -540,6 +558,11 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
? grpc_executor_scheduler
: grpc_schedule_on_exec_ctx);
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.is_delayed_ping_timer_set = false;
@ -1464,6 +1487,21 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
if (!t->is_client) {
if (op->send_initial_metadata) {
gpr_timespec deadline =
op->payload->send_initial_metadata.send_initial_metadata->deadline;
GPR_ASSERT(0 ==
gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
}
if (op->send_trailing_metadata) {
gpr_timespec deadline =
op->payload->send_trailing_metadata.send_trailing_metadata->deadline;
GPR_ASSERT(0 ==
gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
}
}
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
@ -2129,8 +2167,8 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
double bw_dbl, double bdp_dbl) {
int32_t bdp = GPR_CLAMP((int32_t)bdp_dbl, 128, INT32_MAX);
int32_t target = GPR_MAX((int32_t)bw_dbl / 1000, bdp);
int32_t bdp = (int32_t)GPR_CLAMP(bdp_dbl, 128.0, INT32_MAX);
int32_t target = (int32_t)GPR_MAX(bw_dbl / 1000, bdp);
// frame size is bounded [2^14,2^24-1]
int32_t frame_size = GPR_CLAMP(target, 16384, 16777215);
int64_t delta = (int64_t)frame_size -

@ -66,6 +66,11 @@ typedef enum {
GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */
} grpc_chttp2_ping_type;
typedef enum {
GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY,
GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT,
} grpc_chttp2_optimization_target;
typedef enum {
GRPC_CHTTP2_PCL_INITIATE = 0,
GRPC_CHTTP2_PCL_NEXT,
@ -229,6 +234,8 @@ struct grpc_chttp2_transport {
/** should we probe bdp? */
bool enable_bdp_probe;
grpc_chttp2_optimization_target opt_target;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];

@ -57,9 +57,6 @@
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
/* The maximum number of polling threads per polling island. By default no
limit */
static int g_max_pollers_per_pi = INT_MAX;
@ -92,7 +89,7 @@ typedef enum {
} poll_obj_type;
typedef struct poll_obj {
#ifdef PO_DEBUG
#ifndef NDEBUG
poll_obj_type obj_type;
#endif
gpr_mu mu;
@ -893,7 +890,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
* would be holding a lock to it anyway. */
gpr_mu_lock(&new_fd->po.mu);
new_fd->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
new_fd->po.obj_type = POLL_OBJ_FD;
#endif
@ -1171,7 +1168,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->po.mu);
*mu = &pollset->po.mu;
pollset->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
pollset->po.obj_type = POLL_OBJ_POLLSET;
#endif
@ -1625,7 +1622,7 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
poll_obj_type item_type) {
GPR_TIMER_BEGIN("add_poll_object", 0);
#ifdef PO_DEBUG
#ifndef NDEBUG
GPR_ASSERT(item->obj_type == item_type);
GPR_ASSERT(bag->obj_type == bag_type);
#endif
@ -1784,7 +1781,7 @@ static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
pss->po.obj_type = POLL_OBJ_POLLSET_SET;
#endif
return pss;

@ -103,6 +103,32 @@ typedef struct pollable {
grpc_pollset_worker *root_worker;
} pollable;
static const char *polling_obj_type_string(polling_obj_type t) {
switch (t) {
case PO_POLLING_GROUP:
return "polling_group";
case PO_POLLSET_SET:
return "pollset_set";
case PO_POLLSET:
return "pollset";
case PO_FD:
return "fd";
case PO_EMPTY_POLLABLE:
return "empty_pollable";
case PO_COUNT:
return "<invalid:count>";
}
return "<invalid>";
}
static char *pollable_desc(pollable *p) {
char *out;
gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d",
polling_obj_type_string(p->po.type), p->po.group, p->epfd,
p->wakeup.read_fd);
return out;
}
static pollable g_empty_pollable;
static void pollable_init(pollable *p, polling_obj_type type);
@ -472,7 +498,7 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
GPR_ASSERT(epfd != -1);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "add fd %p to pollable %p", fd, p);
gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
gpr_mu_lock(&fd->orphaned_mu);
@ -537,10 +563,18 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
if (worker->pollable != &pollset->pollable) {
gpr_mu_lock(&worker->pollable->po.mu);
}
if (worker->initialized_cv) {
if (worker->initialized_cv && worker != pollset->root_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable);
}
worker->kicked = true;
gpr_cv_signal(&worker->cv);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable);
}
append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
"pollset_shutdown");
}
@ -770,7 +804,9 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
int timeout = poll_deadline_to_millis_timeout(deadline, now);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p poll %p for %dms", pollset, p, timeout);
char *desc = pollable_desc(p);
gpr_log(GPR_DEBUG, "PS:%p poll %p[%s] for %dms", pollset, p, desc, timeout);
gpr_free(desc);
}
if (timeout != 0) {
@ -985,10 +1021,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
static const char *err_desc = "pollset_add_fd";
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->current_pollable == &g_empty_pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace))
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
fd);
}
/* empty pollable --> single fd pollable */
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &fd->pollable;
@ -997,16 +1034,23 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
REF_BY(fd, 2, "pollset_pollable");
} else if (pollset->current_pollable == &pollset->pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace))
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
}
append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
err_desc);
} else if (pollset->current_pollable != &fd->pollable) {
grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
if (GRPC_TRACER_ON(grpc_polling_trace))
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
pollset, fd, had_fd);
}
/* Introduce a spurious completion.
If we do not, then it may be that the fd-specific epoll set consumed
a completion without being polled, leading to a missed edge going up. */
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure);
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable;
if (append_error(&error, pollable_materialize(&pollset->pollable),

@ -54,9 +54,6 @@
gpr_log(GPR_INFO, __VA_ARGS__); \
}
/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
static int grpc_wakeup_signal = -1;
static bool is_grpc_wakeup_signal_initialized = false;
@ -85,7 +82,7 @@ typedef enum {
} poll_obj_type;
typedef struct poll_obj {
#ifdef PO_DEBUG
#ifndef NDEBUG
poll_obj_type obj_type;
#endif
gpr_mu mu;
@ -821,7 +818,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
* would be holding a lock to it anyway. */
gpr_mu_lock(&new_fd->po.mu);
new_fd->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
new_fd->po.obj_type = POLL_OBJ_FD;
#endif
@ -1079,7 +1076,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->po.mu);
*mu = &pollset->po.mu;
pollset->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
pollset->po.obj_type = POLL_OBJ_POLLSET;
#endif
@ -1416,7 +1413,7 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
poll_obj_type item_type) {
GPR_TIMER_BEGIN("add_poll_object", 0);
#ifdef PO_DEBUG
#ifndef NDEBUG
GPR_ASSERT(item->obj_type == item_type);
GPR_ASSERT(bag->obj_type == bag_type);
#endif
@ -1575,7 +1572,7 @@ static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifdef PO_DEBUG
#ifndef NDEBUG
pss->po.obj_type = POLL_OBJ_POLLSET_SET;
#endif
return pss;

@ -54,7 +54,7 @@ static int retry_named_port_failure(int status, request *r,
int retry_status;
uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
r->port = svc[i][1];
r->port = gpr_strdup(svc[i][1]);
retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
r->host, r->port, r->hints);
if (retry_status < 0 || getaddrinfo_cb == NULL) {
@ -127,6 +127,8 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints);
gpr_free(r->host);
gpr_free(r->port);
gpr_free(r);
uv_freeaddrinfo(res);
}

@ -48,6 +48,7 @@ typedef struct grpc_uv_tcp_connect {
static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
grpc_uv_tcp_connect *connect) {
grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota);
gpr_free(connect->addr_name);
gpr_free(connect);
}
@ -105,6 +106,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
}
done = (--connect->refs == 0);
if (done) {
grpc_exec_ctx_flush(&exec_ctx);
uv_tcp_connect_cleanup(&exec_ctx, connect);
}
GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
@ -140,6 +142,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
connect->refs = 1;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",

@ -234,6 +234,7 @@ static void on_connect(uv_stream_t *server, int status) {
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
acceptor);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(peer_name_string);
}
}

@ -65,7 +65,10 @@ typedef struct {
} grpc_tcp;
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->handle);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
@ -115,13 +118,17 @@ static void uv_close_callback(uv_handle_t *handle) {
grpc_exec_ctx_finish(&exec_ctx);
}
static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
return grpc_resource_user_slice_malloc(exec_ctx, resource_user,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
}
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = handle->data;
(void)suggested_size;
tcp->read_slice = grpc_resource_user_slice_malloc(
&exec_ctx, tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx);
@ -148,6 +155,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// Successful read
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
grpc_slice_buffer_add(tcp->read_slices, sub);
tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user);
error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
@ -334,6 +342,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
@ -350,6 +359,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
@ -357,6 +367,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
uv_unref((uv_handle_t *)handle);
#endif
grpc_exec_ctx_finish(&exec_ctx);
return &tcp->base;
}

@ -44,41 +44,63 @@
grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false);
grpc_tracer_flag grpc_timer_check_trace = GRPC_TRACER_INITIALIZER(false);
/* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
* deadlines earlier than 'queue_deadline" cap are maintained in the heap and
* others are maintained in the list (unordered). This helps to keep the number
* of elements in the heap low.
*
* The 'queue_deadline_cap' gets recomputed periodically based on the timer
* stats maintained in 'stats' and the relevant timers are then moved from the
* 'list' to 'heap'
*/
typedef struct {
gpr_mu mu;
grpc_time_averaged_stats stats;
/* All and only timers with deadlines <= this will be in the heap. */
gpr_atm queue_deadline_cap;
/* The deadline of the next timer due in this shard */
gpr_atm min_deadline;
/* Index in the g_shard_queue */
/* Index of this timer_shard in the g_shard_queue */
uint32_t shard_queue_index;
/* This holds all timers with deadlines < queue_deadline_cap. Timers in this
list have the top bit of their deadline set to 0. */
grpc_timer_heap heap;
/* This holds timers whose deadline is >= queue_deadline_cap. */
grpc_timer list;
} shard_type;
} timer_shard;
/* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address
* is hashed to select the timer shard to add the timer to */
static timer_shard g_shards[NUM_SHARDS];
/* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
* the deadline of the next timer in each shard).
* Access to this is protected by g_shared_mutables.mu */
static timer_shard *g_shard_queue[NUM_SHARDS];
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
* an expensive operation) */
GPR_TLS_DECL(g_last_seen_min_timer);
struct shared_mutables {
/* The deadline of the next timer due across all timer shards */
gpr_atm min_timer;
/* Allow only one run_some_expired_timers at once */
gpr_spinlock checker_mu;
bool initialized;
/* Protects g_shard_queue */
/* Protects g_shard_queue (and the shared_mutables struct itself) */
gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
static struct shared_mutables g_shared_mutables = {
.checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
};
static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
/* Protected by g_shared_mutables.mu */
static shard_type *g_shard_queue[NUM_SHARDS];
static gpr_timespec g_start_time;
GPR_TLS_DECL(g_last_seen_min_timer);
static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
if (a > GPR_ATM_MAX - b) {
return GPR_ATM_MAX;
@ -122,7 +144,7 @@ static gpr_timespec atm_to_timespec(gpr_atm x) {
return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
}
static gpr_atm compute_min_deadline(shard_type *shard) {
static gpr_atm compute_min_deadline(timer_shard *shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
@ -142,7 +164,7 @@ void grpc_timer_list_init(gpr_timespec now) {
grpc_register_tracer("timer_check", &grpc_timer_check_trace);
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
timer_shard *shard = &g_shards[i];
gpr_mu_init(&shard->mu);
grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
0.5);
@ -161,7 +183,7 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
exec_ctx, GPR_ATM_MAX, NULL,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
timer_shard *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
grpc_timer_heap_destroy(&shard->heap);
}
@ -187,7 +209,7 @@ static void list_remove(grpc_timer *timer) {
}
static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
shard_type *temp;
timer_shard *temp;
temp = g_shard_queue[first_shard_queue_index];
g_shard_queue[first_shard_queue_index] =
g_shard_queue[first_shard_queue_index + 1];
@ -198,7 +220,7 @@ static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
first_shard_queue_index + 1;
}
static void note_deadline_change(shard_type *shard) {
static void note_deadline_change(timer_shard *shard) {
while (shard->shard_queue_index > 0 &&
shard->min_deadline <
g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
@ -215,7 +237,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_closure *closure,
gpr_timespec now) {
int is_first_timer = 0;
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
@ -303,7 +325,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
return;
}
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
@ -321,12 +343,12 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
gpr_mu_unlock(&shard->mu);
}
/* This is called when the queue is empty and "now" has reached the
queue_deadline_cap. We compute a new queue deadline and then scan the map
for timers that fall at or under it. Returns true if the queue is no
longer empty.
/* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving
all relevant timers in shard->list (i.e timers with deadlines earlier than
'queue_deadline_cap') into into shard->heap.
Returns 'true' if shard->heap has atleast ONE element
REQUIRES: shard->mu locked */
static int refill_queue(shard_type *shard, gpr_atm now) {
static int refill_heap(timer_shard *shard, gpr_atm now) {
/* Compute the new queue window width and bound by the limits: */
double computed_deadline_delta =
grpc_time_averaged_stats_update_average(&shard->stats) *
@ -363,7 +385,7 @@ static int refill_queue(shard_type *shard, gpr_atm now) {
/* This pops the next non-cancelled timer with deadline <= now from the
queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
grpc_timer *timer;
for (;;) {
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
@ -373,7 +395,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
}
if (grpc_timer_heap_is_empty(&shard->heap)) {
if (now < shard->queue_deadline_cap) return NULL;
if (!refill_queue(shard, now)) return NULL;
if (!refill_heap(shard, now)) return NULL;
}
timer = grpc_timer_heap_top(&shard->heap);
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
@ -393,7 +415,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
}
/* REQUIRES: shard->mu unlocked */
static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
gpr_atm now, gpr_atm *new_min_deadline,
grpc_error *error) {
size_t n = 0;

@ -50,6 +50,9 @@ static completed_thread *g_completed_threads;
static bool g_kicked;
// is there a thread waiting until the next timer should fire?
static bool g_has_timed_waiter;
// the deadline of the current timed waiter thread (only relevant if
// g_has_timed_waiter is true)
static gpr_timespec g_timed_waiter_deadline;
// generation counter to track which thread is waiting for the next timer
static uint64_t g_timed_waiter_generation;
@ -101,8 +104,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
start_timer_thread_and_unlock();
} else {
// if there's no thread waiting with a timeout, kick an existing
// waiter
// so that the next deadline is not missed
// waiter so that the next deadline is not missed
if (!g_has_timed_waiter) {
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "kick untimed waiter");
@ -132,44 +134,79 @@ static bool wait_until(gpr_timespec next) {
gpr_mu_unlock(&g_mu);
return false;
}
// if there's no timed waiter, we should become one: that waiter waits
// only until the next timer should expire
// all other timers wait forever
uint64_t my_timed_waiter_generation = g_timed_waiter_generation - 1;
if (!g_has_timed_waiter && gpr_time_cmp(next, inf_future) != 0) {
g_has_timed_waiter = true;
// we use a generation counter to track the timed waiter so we can
// cancel an existing one quickly (and when it actually times out it'll
// figure stuff out instead of incurring a wakeup)
my_timed_waiter_generation = ++g_timed_waiter_generation;
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_timespec wait_time = gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC));
gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds",
wait_time.tv_sec, wait_time.tv_nsec);
// If g_kicked is true at this point, it means there was a kick from the timer
// system that the timer-manager threads here missed. We cannot trust 'next'
// here any longer (since there might be an earlier deadline). So if g_kicked
// is true at this point, we should quickly exit this and get the next
// deadline from the timer system
if (!g_kicked) {
// if there's no timed waiter, we should become one: that waiter waits
// only until the next timer should expire. All other timers wait forever
//
// 'g_timed_waiter_generation' is a global generation counter. The idea here
// is that the thread becoming a timed-waiter increments and stores this
// global counter locally in 'my_timed_waiter_generation' before going to
// sleep. After waking up, if my_timed_waiter_generation ==
// g_timed_waiter_generation, it can be sure that it was the timed_waiter
// thread (and that no other thread took over while this was asleep)
//
// Initialize my_timed_waiter_generation to some value that is NOT equal to
// g_timed_waiter_generation
uint64_t my_timed_waiter_generation = g_timed_waiter_generation - 1;
/* If there's no timed waiter, we should become one: that waiter waits only
until the next timer should expire. All other timer threads wait forever
unless their 'next' is earlier than the current timed-waiter's deadline
(in which case the thread with earlier 'next' takes over as the new timed
waiter) */
if (gpr_time_cmp(next, inf_future) != 0) {
if (!g_has_timed_waiter ||
(gpr_time_cmp(next, g_timed_waiter_deadline) < 0)) {
my_timed_waiter_generation = ++g_timed_waiter_generation;
g_has_timed_waiter = true;
g_timed_waiter_deadline = next;
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_timespec wait_time =
gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC));
gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds",
wait_time.tv_sec, wait_time.tv_nsec);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = inf_future;
}
}
} else {
next = inf_future;
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
if (GRPC_TRACER_ON(grpc_timer_check_trace) &&
gpr_time_cmp(next, inf_future) == 0) {
gpr_log(GPR_DEBUG, "sleep until kicked");
}
gpr_cv_wait(&g_cv_wait, &g_mu, next);
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked);
}
// if this was the timed waiter, then we need to check timers, and flag
// that there's now no timed waiter... we'll look for a replacement if
// there's work to do after checking timers (code above)
if (my_timed_waiter_generation == g_timed_waiter_generation) {
g_has_timed_waiter = false;
g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
}
}
gpr_cv_wait(&g_cv_wait, &g_mu, next);
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation, g_kicked);
}
// if this was the timed waiter, then we need to check timers, and flag
// that there's now no timed waiter... we'll look for a replacement if
// there's work to do after checking timers (code above)
if (my_timed_waiter_generation == g_timed_waiter_generation) {
g_has_timed_waiter = false;
}
// if this was a kick from the timer system, consume it (and don't stop
// this thread yet)
if (g_kicked) {
grpc_timer_consume_kick();
g_kicked = false;
}
gpr_mu_unlock(&g_mu);
return true;
}
@ -257,6 +294,9 @@ void grpc_timer_manager_init(void) {
g_waiter_count = 0;
g_completed_threads = NULL;
g_has_timed_waiter = false;
g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
start_threads();
}
@ -302,6 +342,7 @@ void grpc_kick_poller(void) {
gpr_mu_lock(&g_mu);
g_kicked = true;
g_has_timed_waiter = false;
g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
++g_timed_waiter_generation;
gpr_cv_signal(&g_cv_wait);
gpr_mu_unlock(&g_mu);

@ -462,6 +462,35 @@ static BIGNUM *bignum_from_base64(grpc_exec_ctx *exec_ctx, const char *b64) {
return result;
}
#if OPENSSL_VERSION_NUMBER < 0x10100000L
// Provide compatibility across OpenSSL 1.02 and 1.1.
static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) {
/* If the fields n and e in r are NULL, the corresponding input
* parameters MUST be non-NULL for n and e. d may be
* left NULL (in case only the public key is used).
*/
if ((r->n == NULL && n == NULL) || (r->e == NULL && e == NULL)) {
return 0;
}
if (n != NULL) {
BN_free(r->n);
r->n = n;
}
if (e != NULL) {
BN_free(r->e);
r->e = e;
}
if (d != NULL) {
BN_free(r->d);
r->d = d;
}
return 1;
}
#endif // OPENSSL_VERSION_NUMBER < 0x10100000L
static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json,
const char *kty) {
const grpc_json *key_prop;
@ -478,21 +507,27 @@ static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json,
gpr_log(GPR_ERROR, "Could not create rsa key.");
goto end;
}
BIGNUM *tmp_n = NULL;
BIGNUM *tmp_e = NULL;
for (key_prop = json->child; key_prop != NULL; key_prop = key_prop->next) {
if (strcmp(key_prop->key, "n") == 0) {
rsa->n =
tmp_n =
bignum_from_base64(exec_ctx, validate_string_field(key_prop, "n"));
if (rsa->n == NULL) goto end;
if (tmp_n == NULL) goto end;
} else if (strcmp(key_prop->key, "e") == 0) {
rsa->e =
tmp_e =
bignum_from_base64(exec_ctx, validate_string_field(key_prop, "e"));
if (rsa->e == NULL) goto end;
if (tmp_e == NULL) goto end;
}
}
if (rsa->e == NULL || rsa->n == NULL) {
if (tmp_e == NULL || tmp_n == NULL) {
gpr_log(GPR_ERROR, "Missing RSA public key field.");
goto end;
}
if (!RSA_set0_key(rsa, tmp_n, tmp_e, NULL)) {
gpr_log(GPR_ERROR, "Cannot set RSA key from inputs.");
goto end;
}
result = EVP_PKEY_new();
EVP_PKEY_set1_RSA(result, rsa); /* uprefs rsa. */

@ -49,7 +49,6 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
grpc_transport_stream_op_batch op;
gpr_atm security_context_set;
gpr_mu security_context_mu;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
@ -92,11 +91,10 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
size_t num_md,
grpc_credentials_status status,
const char *error_details) {
grpc_call_element *elem = (grpc_call_element *)user_data;
grpc_transport_stream_op_batch *batch =
(grpc_transport_stream_op_batch *)user_data;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
reset_auth_metadata_context(&calld->auth_md_context);
grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_CREDENTIALS_OK) {
@ -108,9 +106,10 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED);
} else {
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
GPR_ASSERT(op->send_initial_metadata);
mdb = op->payload->send_initial_metadata.send_initial_metadata;
for (i = 0; i < num_md; i++) {
GPR_ASSERT(batch->send_initial_metadata);
grpc_metadata_batch *mdb =
batch->payload->send_initial_metadata.send_initial_metadata;
for (size_t i = 0; i < num_md; i++) {
add_error(&error,
grpc_metadata_batch_add_tail(
exec_ctx, mdb, &calld->md_links[i],
@ -120,9 +119,9 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
if (error == GRPC_ERROR_NONE) {
grpc_call_next_op(exec_ctx, elem, op);
grpc_call_next_op(exec_ctx, elem, batch);
} else {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
}
}
@ -158,11 +157,11 @@ void build_auth_metadata_context(grpc_security_connector *sc,
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_client_security_context *ctx =
(grpc_client_security_context *)op->payload
(grpc_client_security_context *)batch->payload
->context[GRPC_CONTEXT_SECURITY]
.value;
grpc_call_credentials *channel_call_creds =
@ -171,7 +170,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
if (channel_call_creds == NULL && !call_creds_has_md) {
/* Skip sending metadata altogether. */
grpc_call_next_op(exec_ctx, elem, op);
grpc_call_next_op(exec_ctx, elem, batch);
return;
}
@ -180,7 +179,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
ctx->creds, NULL);
if (calld->creds == NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
exec_ctx, batch,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
@ -194,28 +193,29 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
calld->op = *op; /* Copy op (originates from the caller's stack). */
GPR_ASSERT(calld->pollent != NULL);
grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
on_credentials_metadata, elem);
on_credentials_metadata, batch);
}
static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_security_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
grpc_transport_stream_op_batch *batch =
(grpc_transport_stream_op_batch *)user_data;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
if (status == GRPC_SECURITY_OK) {
send_security_metadata(exec_ctx, elem, &calld->op);
send_security_metadata(exec_ctx, elem, batch);
} else {
char *error_msg;
char *host = grpc_slice_to_c_string(calld->host);
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
host);
gpr_free(host);
grpc_call_element_signal_error(
exec_ctx, elem,
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED));
@ -223,35 +223,29 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("auth_start_transport_op", 0);
static void auth_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0);
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l;
grpc_client_security_context *sec_ctx = NULL;
if (!op->cancel_stream) {
if (!batch->cancel_stream) {
/* double checked lock over security context to ensure it's set once */
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
gpr_mu_lock(&calld->security_context_mu);
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
GPR_ASSERT(op->payload->context != NULL);
if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
op->payload->context[GRPC_CONTEXT_SECURITY].value =
GPR_ASSERT(batch->payload->context != NULL);
if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
batch->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create();
op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
grpc_client_security_context *sec_ctx =
batch->payload->context[GRPC_CONTEXT_SECURITY].value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
@ -261,9 +255,9 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
if (op->send_initial_metadata) {
for (l = op->payload->send_initial_metadata.send_initial_metadata->list
.head;
if (batch->send_initial_metadata) {
for (grpc_linked_mdelem *l = batch->payload->send_initial_metadata
.send_initial_metadata->list.head;
l != NULL; l = l->next) {
grpc_mdelem md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
@ -284,19 +278,19 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
}
if (calld->have_host) {
char *call_host = grpc_slice_to_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
batch->handler_private.extra_arg = elem;
grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, chand->auth_context,
on_host_checked, elem);
on_host_checked, batch);
gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_op", 0);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */
}
}
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
GPR_TIMER_END("auth_start_transport_op", 0);
grpc_call_next_op(exec_ctx, elem, batch);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@ -379,7 +373,15 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, set_pollset_or_pollset_set, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, grpc_channel_next_get_info, "client-auth"};
auth_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client-auth"};

@ -383,8 +383,7 @@ static void fake_channel_add_handshakers(
grpc_handshake_manager_add(
handshake_mgr,
grpc_security_handshaker_create(
exec_ctx, tsi_create_adapter_handshaker(
tsi_create_fake_handshaker(true /* is_client */)),
exec_ctx, tsi_create_fake_handshaker(true /* is_client */),
&sc->base));
}
@ -394,8 +393,7 @@ static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx,
grpc_handshake_manager_add(
handshake_mgr,
grpc_security_handshaker_create(
exec_ctx, tsi_create_adapter_handshaker(
tsi_create_fake_handshaker(false /* is_client */)),
exec_ctx, tsi_create_fake_handshaker(false /* is_client */),
&sc->base));
}

@ -27,14 +27,9 @@
#include "src/core/lib/slice/slice_internal.h"
typedef struct call_data {
grpc_metadata_batch *recv_initial_metadata;
/* Closure to call when finished with the auth_on_recv hook. */
grpc_closure *on_done_recv;
/* Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member after
handling it. */
grpc_closure auth_on_recv;
grpc_transport_stream_op_batch *transport_op;
grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
grpc_metadata_array md;
const grpc_metadata *consumed_md;
size_t num_consumed_md;
@ -90,125 +85,96 @@ static void on_md_processing_done(
grpc_status_code status, const char *error_details) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. "
"Ignoring...");
}
grpc_error *error = GRPC_ERROR_NONE;
if (status == GRPC_STATUS_OK) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
/* TODO(ctiller): propagate error */
GRPC_LOG_IF_ERROR(
"grpc_metadata_batch_filter",
grpc_metadata_batch_filter(&exec_ctx, calld->recv_initial_metadata,
remove_consumed_md, elem,
"Response metadata filtering error"));
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
GRPC_CLOSURE_SCHED(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
error = grpc_metadata_batch_filter(
&exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
} else {
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
if (calld->transport_op->send_message) {
grpc_byte_stream_destroy(
&exec_ctx, calld->transport_op->payload->send_message.send_message);
calld->transport_op->payload->send_message.send_message = NULL;
if (error_details == NULL) {
error_details = "Authentication metadata processing failed.";
}
GRPC_CLOSURE_SCHED(
&exec_ctx, calld->on_done_recv,
error =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status));
GRPC_ERROR_INT_GRPC_STATUS, status);
}
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
error);
grpc_exec_ctx_finish(&exec_ctx);
}
static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
calld->md = metadata_batch_to_md_array(calld->recv_initial_metadata);
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process(
chand->creds->processor.state, calld->auth_context,
calld->md.metadata, calld->md.count, on_md_processing_done, elem);
return;
}
}
GRPC_CLOSURE_SCHED(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
static void auth_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
calld->recv_initial_metadata =
op->payload->recv_initial_metadata.recv_initial_metadata;
calld->on_done_recv =
op->payload->recv_initial_metadata.recv_initial_metadata_ready;
op->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->auth_on_recv;
calld->transport_op = op;
if (batch->recv_initial_metadata) {
// Inject our callback.
calld->recv_initial_metadata_batch = batch;
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
set_recv_ops_md_callbacks(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
grpc_call_next_op(exec_ctx, elem, batch);
}
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_server_security_context *server_ctx = NULL;
/* initialize members */
memset(calld, 0, sizeof(*calld));
GRPC_CLOSURE_INIT(&calld->auth_on_recv, auth_on_recv, elem,
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
// Create server security context. Set its auth context from channel
// data and save it in the call context.
grpc_server_security_context *server_ctx =
grpc_server_security_context_create();
server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
calld->auth_context = server_ctx->auth_context;
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
args->context[GRPC_CONTEXT_SECURITY].destroy(
args->context[GRPC_CONTEXT_SECURITY].value);
}
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
calld->auth_context = server_ctx->auth_context;
args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
args->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_server_security_context_destroy;
return GRPC_ERROR_NONE;
}
@ -221,19 +187,15 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
channel_data *chand = elem->channel_data;
grpc_auth_context *auth_context =
grpc_find_auth_context_in_args(args->channel_args);
grpc_server_credentials *creds =
grpc_find_server_credentials_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(auth_context != NULL);
/* initialize members */
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
grpc_server_credentials *creds =
grpc_find_server_credentials_in_args(args->channel_args);
chand->creds = grpc_server_credentials_ref(creds);
return GRPC_ERROR_NONE;
}
@ -241,14 +203,13 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
grpc_server_credentials_unref(exec_ctx, chand->creds);
}
const grpc_channel_filter grpc_server_auth_filter = {
auth_start_transport_op,
auth_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,

@ -38,7 +38,7 @@ struct gpr_arena {
gpr_arena *gpr_arena_create(size_t initial_size) {
initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
gpr_arena *a = gpr_zalloc(sizeof(gpr_arena) + initial_size);
gpr_arena *a = (gpr_arena *)gpr_zalloc(sizeof(gpr_arena) + initial_size);
a->initial_zone.size_end = initial_size;
return a;
}
@ -64,7 +64,7 @@ void *gpr_arena_alloc(gpr_arena *arena, size_t size) {
zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm);
if (next_z == NULL) {
size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far);
next_z = gpr_zalloc(sizeof(zone) + next_z_size);
next_z = (zone *)gpr_zalloc(sizeof(zone) + next_z_size);
next_z->size_begin = z->size_end;
next_z->size_end = z->size_end + next_z_size;
if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) {

@ -21,12 +21,12 @@
gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta,
gpr_atm min, gpr_atm max) {
gpr_atm current;
gpr_atm new;
gpr_atm current_value;
gpr_atm new_value;
do {
current = gpr_atm_no_barrier_load(value);
new = GPR_CLAMP(current + delta, min, max);
if (new == current) break;
} while (!gpr_atm_no_barrier_cas(value, current, new));
return new;
current_value = gpr_atm_no_barrier_load(value);
new_value = GPR_CLAMP(current_value + delta, min, max);
if (new_value == current_value) break;
} while (!gpr_atm_no_barrier_cas(value, current_value, new_value));
return new_value;
}

@ -76,7 +76,7 @@ static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; }
gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left,
gpr_avl_node *right) {
gpr_avl_node *node = gpr_malloc(sizeof(*node));
gpr_avl_node *node = (gpr_avl_node *)gpr_malloc(sizeof(*node));
gpr_ref_init(&node->refs, 1);
node->key = key;
node->value = value;

@ -64,6 +64,8 @@ void gpr_default_log(gpr_log_func_args *args) {
time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
static __thread long tid = 0;
if (tid == 0) tid = gettid();
timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
@ -81,7 +83,7 @@ void gpr_default_log(gpr_log_func_args *args) {
gpr_asprintf(&prefix, "%s%s.%09" PRId32 " %7ld %s:%d]",
gpr_log_severity_string(args->severity), time_buffer,
now.tv_nsec, gettid(), display_file, args->line);
now.tv_nsec, tid, display_file, args->line);
fprintf(stderr, "%-60s %s\n", prefix, args->message);
gpr_free(prefix);

@ -31,12 +31,11 @@ void gpr_mpscq_destroy(gpr_mpscq *q) {
GPR_ASSERT(q->tail == &q->stub);
}
bool gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n) {
void gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n) {
gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL);
gpr_mpscq_node *prev =
(gpr_mpscq_node *)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
gpr_atm_rel_store(&prev->next, (gpr_atm)n);
return prev == &q->stub;
}
gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q) {
@ -78,25 +77,3 @@ gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty) {
*empty = false;
return NULL;
}
void gpr_locked_mpscq_init(gpr_locked_mpscq *q) {
gpr_mpscq_init(&q->queue);
q->read_lock = GPR_SPINLOCK_INITIALIZER;
}
void gpr_locked_mpscq_destroy(gpr_locked_mpscq *q) {
gpr_mpscq_destroy(&q->queue);
}
bool gpr_locked_mpscq_push(gpr_locked_mpscq *q, gpr_mpscq_node *n) {
return gpr_mpscq_push(&q->queue, n);
}
gpr_mpscq_node *gpr_locked_mpscq_pop(gpr_locked_mpscq *q) {
if (gpr_spinlock_trylock(&q->read_lock)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->queue);
gpr_spinlock_unlock(&q->read_lock);
return n;
}
return NULL;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save