Merge branch 'master' into implement-server-interceptor-for-unary-unary-call

pull/22032/head
Zhanghui Mao 5 years ago committed by GitHub
commit 958fb08d0d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 2
      .github/ISSUE_TEMPLATE/cleanup_request.md
  3. 2
      .github/ISSUE_TEMPLATE/feature_request.md
  4. 2
      .github/pull_request_template.md
  5. 6
      .gitignore
  6. 4
      .pylintrc
  7. 1
      BUILD
  8. 1
      BUILD.gn
  9. 1
      CMakeLists.txt
  10. 2
      Makefile
  11. 1
      build.yaml
  12. 3
      doc/python/sphinx/_static/custom.css
  13. 18
      doc/python/sphinx/conf.py
  14. 132
      doc/python/sphinx/grpc_asyncio.rst
  15. 1
      doc/python/sphinx/index.rst
  16. 1
      gRPC-C++.podspec
  17. 1
      gRPC-Core.podspec
  18. 1
      grpc.gyp
  19. 202
      include/grpcpp/impl/codegen/server_callback_handlers.h
  20. 52
      include/grpcpp/impl/codegen/server_callback_impl.h
  21. 2
      include/grpcpp/impl/codegen/server_context_impl.h
  22. 43
      include/grpcpp/security/tls_credentials_options.h
  23. 2
      setup.py
  24. 4
      src/core/ext/filters/client_channel/client_channel.cc
  25. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  26. 8
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  27. 20
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  28. 12
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  29. 8
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  30. 10
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  31. 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  32. 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc
  33. 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  34. 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc
  35. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  36. 4
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  37. 2
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
  38. 6
      src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
  39. 4
      src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc
  40. 8
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  41. 8
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  42. 2
      src/core/ext/filters/client_channel/service_config.cc
  43. 27
      src/core/ext/filters/client_channel/xds/xds_api.cc
  44. 4
      src/core/ext/filters/client_channel/xds/xds_bootstrap.cc
  45. 6
      src/core/ext/filters/message_size/message_size_filter.cc
  46. 8
      src/core/lib/gprpp/memory.h
  47. 14
      src/core/lib/iomgr/ev_epollex_linux.cc
  48. 1
      src/core/lib/security/credentials/credentials.h
  49. 19
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  50. 10
      src/core/lib/security/credentials/oauth2/oauth2_credentials.h
  51. 8
      src/core/lib/security/transport/client_auth_filter.cc
  52. 4
      src/core/lib/security/transport/security_handshaker.cc
  53. 13
      src/core/lib/transport/metadata_batch.cc
  54. 11
      src/core/lib/transport/metadata_batch.h
  55. 2
      src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc
  56. 2
      src/core/tsi/ssl/session_cache/ssl_session_openssl.cc
  57. 2
      src/cpp/common/alts_util.cc
  58. 62
      src/cpp/common/tls_credentials_options.cc
  59. 48
      src/cpp/server/server_callback.cc
  60. 11
      src/proto/grpc/core/BUILD
  61. 52
      src/proto/grpc/testing/BUILD
  62. 6
      src/proto/grpc/testing/control.proto
  63. 56
      src/python/grpcio/grpc/BUILD.bazel
  64. 5
      src/python/grpcio/grpc/__init__.py
  65. 2
      src/python/grpcio/grpc/_cython/_cygrpc/aio/common.pyx.pxi
  66. 7
      src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi
  67. 450
      src/python/grpcio/grpc/_simple_stubs.py
  68. 11
      src/python/grpcio/grpc/experimental/BUILD.bazel
  69. 58
      src/python/grpcio/grpc/experimental/__init__.py
  70. 99
      src/python/grpcio/grpc/experimental/aio/__init__.py
  71. 345
      src/python/grpcio/grpc/experimental/aio/_base_channel.py
  72. 254
      src/python/grpcio/grpc/experimental/aio/_base_server.py
  73. 227
      src/python/grpcio/grpc/experimental/aio/_channel.py
  74. 16
      src/python/grpcio/grpc/experimental/aio/_interceptor.py
  75. 3
      src/python/grpcio/grpc/experimental/aio/_server.py
  76. 5
      src/python/grpcio_health_checking/grpc_health/v1/BUILD.bazel
  77. 113
      src/python/grpcio_health_checking/grpc_health/v1/_async.py
  78. 11
      src/python/grpcio_health_checking/grpc_health/v1/health.py
  79. 31
      src/python/grpcio_tests/commands.py
  80. 1
      src/python/grpcio_tests/setup.py
  81. 27
      src/python/grpcio_tests/tests/qps/BUILD.bazel
  82. 10
      src/python/grpcio_tests/tests/qps/histogram.py
  83. 59
      src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
  84. 63
      src/python/grpcio_tests/tests_aio/benchmark/BUILD.bazel
  85. 155
      src/python/grpcio_tests/tests_aio/benchmark/benchmark_client.py
  86. 55
      src/python/grpcio_tests/tests_aio/benchmark/benchmark_servicer.py
  87. 18
      src/python/grpcio_tests/tests_aio/benchmark/server.py
  88. 58
      src/python/grpcio_tests/tests_aio/benchmark/worker.py
  89. 367
      src/python/grpcio_tests/tests_aio/benchmark/worker_servicer.py
  90. 29
      src/python/grpcio_tests/tests_aio/health_check/BUILD.bazel
  91. 13
      src/python/grpcio_tests/tests_aio/health_check/__init__.py
  92. 262
      src/python/grpcio_tests/tests_aio/health_check/health_servicer_test.py
  93. 1
      src/python/grpcio_tests/tests_aio/tests.json
  94. 21
      src/python/grpcio_tests/tests_py3_only/__init__.py
  95. 41
      src/python/grpcio_tests/tests_py3_only/unit/BUILD.bazel
  96. 13
      src/python/grpcio_tests/tests_py3_only/unit/__init__.py
  97. 276
      src/python/grpcio_tests/tests_py3_only/unit/_simple_stubs_test.py
  98. 2
      templates/Makefile.template
  99. 4
      test/core/channel/channelz_test.cc
  100. 2
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2,7 +2,7 @@
name: Report a bug name: Report a bug
about: Create a report to help us improve about: Create a report to help us improve
labels: kind/bug, priority/P2 labels: kind/bug, priority/P2
assignees: markdroth assignees: nicolasnoble
--- ---

@ -2,7 +2,7 @@
name: Request a cleanup name: Request a cleanup
about: Suggest a cleanup in our repository about: Suggest a cleanup in our repository
labels: kind/internal cleanup, priority/P2 labels: kind/internal cleanup, priority/P2
assignees: markdroth assignees: nicolasnoble
--- ---

@ -2,7 +2,7 @@
name: Request a feature name: Request a feature
about: Suggest an idea for this project about: Suggest an idea for this project
labels: kind/enhancement, priority/P2 labels: kind/enhancement, priority/P2
assignees: markdroth assignees: nicolasnoble
--- ---

@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be
--> -->
@markdroth @nicolasnoble

6
.gitignore vendored

@ -115,11 +115,7 @@ Podfile.lock
.idea/ .idea/
# Bazel files # Bazel files
bazel-bin bazel-*
bazel-genfiles
bazel-grpc
bazel-out
bazel-testlogs
bazel_format_virtual_environment/ bazel_format_virtual_environment/
tools/bazel-* tools/bazel-*

@ -12,14 +12,14 @@ extension-pkg-whitelist=grpc._cython.cygrpc
# TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection # TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection
# not include "unused_" and "ignored_" by default? # not include "unused_" and "ignored_" by default?
dummy-variables-rgx=^ignored_|^unused_ dummy-variables-rgx=^ignored_|^unused_|_
[DESIGN] [DESIGN]
# NOTE(nathaniel): Not particularly attached to this value; it just seems to # NOTE(nathaniel): Not particularly attached to this value; it just seems to
# be what works for us at the moment (excepting the dead-code-walking Beta # be what works for us at the moment (excepting the dead-code-walking Beta
# API). # API).
max-args=7 max-args=14
max-parents=8 max-parents=8
[MISCELLANEOUS] [MISCELLANEOUS]

@ -561,6 +561,7 @@ grpc_cc_library(
"src/core/lib/profiling/timers.h", "src/core/lib/profiling/timers.h",
], ],
external_deps = [ external_deps = [
"absl/memory",
"absl/strings", "absl/strings",
], ],
language = "c++", language = "c++",

@ -162,6 +162,7 @@ config("grpc_config") {
] ]
deps = [ deps = [
":absl/container:inlined_vector", ":absl/container:inlined_vector",
":absl/memory:memory",
":absl/strings:strings", ":absl/strings:strings",
":absl/types:optional", ":absl/types:optional",
] ]

@ -1439,6 +1439,7 @@ target_include_directories(gpr
target_link_libraries(gpr target_link_libraries(gpr
${_gRPC_ALLTARGETS_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES}
absl::inlined_vector absl::inlined_vector
absl::memory
absl::strings absl::strings
absl::optional absl::optional
) )

@ -410,7 +410,7 @@ LDFLAGS += -pthread
endif endif
ifeq ($(SYSTEM),MINGW32) ifeq ($(SYSTEM),MINGW32)
LIBS = m pthread ws2_32 LIBS = m pthread ws2_32 dbghelp
LDFLAGS += -pthread LDFLAGS += -pthread
endif endif

@ -271,6 +271,7 @@ filegroups:
- src/core/lib/profiling/stap_timers.cc - src/core/lib/profiling/stap_timers.cc
deps: deps:
- absl/container:inlined_vector - absl/container:inlined_vector
- absl/memory:memory
- absl/strings:strings - absl/strings:strings
- absl/types:optional - absl/types:optional
uses: uses:

@ -0,0 +1,3 @@
dl.field-list > dt {
word-break: keep-all !important;
}

@ -16,8 +16,8 @@
import os import os
import sys import sys
PYTHON_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), PYTHON_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
'..', '..', '..', 'src', 'python') '..', '..', 'src', 'python')
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio')) sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_channelz')) sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_channelz'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_health_checking')) sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_health_checking'))
@ -53,6 +53,7 @@ extensions = [
'sphinx.ext.todo', 'sphinx.ext.todo',
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
'sphinx.ext.coverage', 'sphinx.ext.coverage',
'sphinx.ext.autodoc.typehints',
] ]
napoleon_google_docstring = True napoleon_google_docstring = True
@ -63,15 +64,9 @@ autodoc_default_options = {
'members': None, 'members': None,
} }
autodoc_mock_imports = [ autodoc_mock_imports = []
'grpc._cython',
'grpc_channelz.v1.channelz_pb2', autodoc_typehints = 'description'
'grpc_channelz.v1.channelz_pb2_grpc',
'grpc_health.v1.health_pb2',
'grpc_health.v1.health_pb2_grpc',
'grpc_reflection.v1alpha.reflection_pb2',
'grpc_reflection.v1alpha.reflection_pb2_grpc',
]
# -- HTML Configuration ------------------------------------------------- # -- HTML Configuration -------------------------------------------------
@ -84,6 +79,7 @@ html_theme_options = {
'description': grpc_version.VERSION, 'description': grpc_version.VERSION,
'show_powered_by': False, 'show_powered_by': False,
} }
html_static_path = ["_static"]
# -- Options for manual page output ------------------------------------------ # -- Options for manual page output ------------------------------------------

@ -0,0 +1,132 @@
gRPC AsyncIO API
================
.. module:: grpc.experimental.aio
Overview
--------
gRPC AsyncIO API is the **new version** of gRPC Python whose architecture is
tailored to AsyncIO. Underlying, it utilizes the same C-extension, gRPC C-Core,
as existing stack, and it replaces all gRPC IO operations with methods provided
by the AsyncIO library.
This stack currently is under active development. Feel free to offer
suggestions by opening issues on our GitHub repo `grpc/grpc <https://github.com/grpc/grpc>`_.
The design doc can be found here as `gRFC <https://github.com/grpc/proposal/pull/155>`_.
Caveats
-------
gRPC Async API objects may only be used on the thread on which they were
created. AsyncIO doesn't provide thread safety for most of its APIs.
Module Contents
---------------
Enable AsyncIO in gRPC
^^^^^^^^^^^^^^^^^^^^^^
.. function:: init_grpc_aio
Enable AsyncIO for gRPC Python.
This function is idempotent and it should be invoked before creation of
AsyncIO stack objects. Otherwise, the application might deadlock.
This function configurates the gRPC C-Core to invoke AsyncIO methods for IO
operations (e.g., socket read, write). The configuration applies to the
entire process.
After invoking this function, making blocking function calls in coroutines
or in the thread running event loop will block the event loop, potentially
starving all RPCs in the process. Refer to the Python language
documentation on AsyncIO for more details (`running-blocking-code <https://docs.python.org/3/library/asyncio-dev.html#running-blocking-code>`_).
Create Channel
^^^^^^^^^^^^^^
Channels are the abstraction of clients, where most of networking logic
happens, for example, managing one or more underlying connections, name
resolution, load balancing, flow control, etc.. If you are using ProtoBuf,
Channel objects works best when further encapsulate into stub objects, then the
application can invoke remote functions as if they are local functions.
.. autofunction:: insecure_channel
.. autofunction:: secure_channel
Channel Object
^^^^^^^^^^^^^^
.. autoclass:: Channel
Create Server
^^^^^^^^^^^^^
.. autofunction:: server
Server Object
^^^^^^^^^^^^^
.. autoclass:: Server
gRPC Exceptions
^^^^^^^^^^^^^^^
.. autoexception:: BaseError
.. autoexception:: UsageError
.. autoexception:: AbortError
.. autoexception:: InternalError
.. autoexception:: AioRpcError
Shared Context
^^^^^^^^^^^^^^^^^^^^
.. autoclass:: RpcContext
Client-Side Context
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: Call
.. autoclass:: UnaryUnaryCall
.. autoclass:: UnaryStreamCall
.. autoclass:: StreamUnaryCall
.. autoclass:: StreamStreamCall
Server-Side Context
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: ServicerContext
Client-Side Interceptor
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: ClientCallDetails
.. autoclass:: InterceptedUnaryUnaryCall
.. autoclass:: UnaryUnaryClientInterceptor
.. Service-Side Context
.. ^^^^^^^^^^^^^^^^^^^^
.. .. autoclass:: ServicerContext
Multi-Callable Interfaces
^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: UnaryUnaryMultiCallable
.. autoclass:: UnaryStreamMultiCallable()
.. autoclass:: StreamUnaryMultiCallable()
.. autoclass:: StreamStreamMultiCallable()

@ -10,6 +10,7 @@ API Reference
:caption: Contents: :caption: Contents:
grpc grpc
grpc_asyncio
grpc_channelz grpc_channelz
grpc_health_checking grpc_health_checking
grpc_reflection grpc_reflection

@ -215,6 +215,7 @@ Pod::Spec.new do |s|
ss.dependency 'gRPC-Core', version ss.dependency 'gRPC-Core', version
abseil_version = '0.20190808.1' abseil_version = '0.20190808.1'
ss.dependency 'abseil/container/inlined_vector', abseil_version ss.dependency 'abseil/container/inlined_vector', abseil_version
ss.dependency 'abseil/memory/memory', abseil_version
ss.dependency 'abseil/strings/strings', abseil_version ss.dependency 'abseil/strings/strings', abseil_version
ss.dependency 'abseil/types/optional', abseil_version ss.dependency 'abseil/types/optional', abseil_version

@ -175,6 +175,7 @@ Pod::Spec.new do |s|
ss.dependency 'BoringSSL-GRPC', '0.0.7' ss.dependency 'BoringSSL-GRPC', '0.0.7'
abseil_version = '0.20190808.1' abseil_version = '0.20190808.1'
ss.dependency 'abseil/container/inlined_vector', abseil_version ss.dependency 'abseil/container/inlined_vector', abseil_version
ss.dependency 'abseil/memory/memory', abseil_version
ss.dependency 'abseil/strings/strings', abseil_version ss.dependency 'abseil/strings/strings', abseil_version
ss.dependency 'abseil/types/optional', abseil_version ss.dependency 'abseil/types/optional', abseil_version
ss.compiler_flags = '-DGRPC_SHADOW_BORINGSSL_SYMBOLS' ss.compiler_flags = '-DGRPC_SHADOW_BORINGSSL_SYMBOLS'

@ -442,6 +442,7 @@
'type': 'static_library', 'type': 'static_library',
'dependencies': [ 'dependencies': [
'absl/container:inlined_vector', 'absl/container:inlined_vector',
'absl/memory:memory',
'absl/strings:strings', 'absl/strings:strings',
'absl/types:optional', 'absl/types:optional',
], ],

@ -117,9 +117,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
class ServerCallbackUnaryImpl : public ServerCallbackUnary { class ServerCallbackUnaryImpl : public ServerCallbackUnary {
public: public:
void Finish(::grpc::Status s) override { void Finish(::grpc::Status s) override {
// A callback that only contains a call to MaybeDone can be run as an
// inline callback regardless of whether or not OnDone is inlineable
// because if the actual OnDone callback needs to be scheduled, MaybeDone
// is responsible for dispatching to an executor thread if needed. Thus,
// when setting up the finish_tag_, we can set its own callback to
// inlineable.
finish_tag_.Set( finish_tag_.Set(
call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_, call_.call(),
[this](bool) {
this->MaybeDone(
reactor_.load(std::memory_order_relaxed)->InternalInlineable()); reactor_.load(std::memory_order_relaxed)->InternalInlineable());
},
&finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_); finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) { if (!ctx_->sent_initial_metadata_) {
@ -144,13 +154,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref(); this->Ref();
// The callback for this function should not be marked inline because it
// is directly invoking a user-controlled reaction
// (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
// thread. However, any OnDone needed after that can be inlined because it
// is already running on an executor thread.
meta_tag_.Set(call_.call(), meta_tag_.Set(call_.call(),
[this](bool ok) { [this](bool ok) {
reactor_.load(std::memory_order_relaxed) ServerUnaryReactor* reactor =
->OnSendInitialMetadataDone(ok); reactor_.load(std::memory_order_relaxed);
MaybeDone(); reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&meta_ops_, false); &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -184,14 +200,13 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed); reactor_.store(reactor, std::memory_order_relaxed);
this->BindReactor(reactor); this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor); this->MaybeCallOnCancel(reactor);
this->MaybeDone(); this->MaybeDone(reactor->InternalInlineable());
} }
const RequestType* request() { return allocator_state_->request(); } const RequestType* request() { return allocator_state_->request(); }
ResponseType* response() { return allocator_state_->response(); } ResponseType* response() { return allocator_state_->response(); }
void MaybeDone() override { void CallOnDone() override {
if (GPR_UNLIKELY(this->Unref() == 1)) {
reactor_.load(std::memory_order_relaxed)->OnDone(); reactor_.load(std::memory_order_relaxed)->OnDone();
grpc_call* call = call_.call(); grpc_call* call = call_.call();
auto call_requester = std::move(call_requester_); auto call_requester = std::move(call_requester_);
@ -200,7 +215,6 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
::grpc::g_core_codegen_interface->grpc_call_unref(call); ::grpc::g_core_codegen_interface->grpc_call_unref(call);
call_requester(); call_requester();
} }
}
ServerReactor* reactor() override { ServerReactor* reactor() override {
return reactor_.load(std::memory_order_relaxed); return reactor_.load(std::memory_order_relaxed);
@ -255,8 +269,13 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
static_cast<::grpc_impl::CallbackServerContext*>( static_cast<::grpc_impl::CallbackServerContext*>(
param.server_context), param.server_context),
param.call, std::move(param.call_requester)); param.call, std::move(param.call_requester));
// Inlineable OnDone can be false in the CompletionOp callback because there
// is no read reactor that has an inlineable OnDone; this only applies to
// the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp( param.server_context->BeginCompletionOp(
param.call, [reader](bool) { reader->MaybeDone(); }, reader); param.call,
[reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); },
reader);
ServerReadReactor<RequestType>* reactor = nullptr; ServerReadReactor<RequestType>* reactor = nullptr;
if (param.status.ok()) { if (param.status.ok()) {
@ -287,8 +306,17 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
class ServerCallbackReaderImpl : public ServerCallbackReader<RequestType> { class ServerCallbackReaderImpl : public ServerCallbackReader<RequestType> {
public: public:
void Finish(::grpc::Status s) override { void Finish(::grpc::Status s) override {
finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_, // A finish tag with only MaybeDone can have its callback inlined
false); // regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no read reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
if (!ctx_->sent_initial_metadata_) { if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
@ -311,13 +339,17 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref(); this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(), meta_tag_.Set(call_.call(),
[this](bool ok) { [this](bool ok) {
reactor_.load(std::memory_order_relaxed) ServerReadReactor<RequestType>* reactor =
->OnSendInitialMetadataDone(ok); reactor_.load(std::memory_order_relaxed);
MaybeDone(); reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&meta_ops_, false); &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -344,24 +376,29 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
void SetupReactor(ServerReadReactor<RequestType>* reactor) { void SetupReactor(ServerReadReactor<RequestType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed); reactor_.store(reactor, std::memory_order_relaxed);
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
read_tag_.Set(call_.call(), read_tag_.Set(call_.call(),
[this](bool ok) { [this, reactor](bool ok) {
reactor_.load(std::memory_order_relaxed)->OnReadDone(ok); reactor->OnReadDone(ok);
MaybeDone(); this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&read_ops_, false); &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_); read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor); this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor); this->MaybeCallOnCancel(reactor);
this->MaybeDone(); // Inlineable OnDone can be false here because there is no read
// reactor that has an inlineable OnDone; this only applies to the
// DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
} }
~ServerCallbackReaderImpl() {} ~ServerCallbackReaderImpl() {}
ResponseType* response() { return &resp_; } ResponseType* response() { return &resp_; }
void MaybeDone() override { void CallOnDone() override {
if (GPR_UNLIKELY(this->Unref() == 1)) {
reactor_.load(std::memory_order_relaxed)->OnDone(); reactor_.load(std::memory_order_relaxed)->OnDone();
grpc_call* call = call_.call(); grpc_call* call = call_.call();
auto call_requester = std::move(call_requester_); auto call_requester = std::move(call_requester_);
@ -369,7 +406,6 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
::grpc::g_core_codegen_interface->grpc_call_unref(call); ::grpc::g_core_codegen_interface->grpc_call_unref(call);
call_requester(); call_requester();
} }
}
ServerReactor* reactor() override { ServerReactor* reactor() override {
return reactor_.load(std::memory_order_relaxed); return reactor_.load(std::memory_order_relaxed);
@ -419,8 +455,13 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
param.server_context), param.server_context),
param.call, static_cast<RequestType*>(param.request), param.call, static_cast<RequestType*>(param.request),
std::move(param.call_requester)); std::move(param.call_requester));
// Inlineable OnDone can be false in the CompletionOp callback because there
// is no write reactor that has an inlineable OnDone; this only applies to
// the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp( param.server_context->BeginCompletionOp(
param.call, [writer](bool) { writer->MaybeDone(); }, writer); param.call,
[writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); },
writer);
ServerWriteReactor<ResponseType>* reactor = nullptr; ServerWriteReactor<ResponseType>* reactor = nullptr;
if (param.status.ok()) { if (param.status.ok()) {
@ -467,8 +508,17 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
class ServerCallbackWriterImpl : public ServerCallbackWriter<ResponseType> { class ServerCallbackWriterImpl : public ServerCallbackWriter<ResponseType> {
public: public:
void Finish(::grpc::Status s) override { void Finish(::grpc::Status s) override {
finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_, // A finish tag with only MaybeDone can have its callback inlined
false); // regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no write reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_); finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) { if (!ctx_->sent_initial_metadata_) {
@ -486,13 +536,17 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref(); this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(), meta_tag_.Set(call_.call(),
[this](bool ok) { [this](bool ok) {
reactor_.load(std::memory_order_relaxed) ServerWriteReactor<ResponseType>* reactor =
->OnSendInitialMetadataDone(ok); reactor_.load(std::memory_order_relaxed);
MaybeDone(); reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&meta_ops_, false); &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -547,24 +601,28 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SetupReactor(ServerWriteReactor<ResponseType>* reactor) { void SetupReactor(ServerWriteReactor<ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed); reactor_.store(reactor, std::memory_order_relaxed);
write_tag_.Set( // The callback for this function should not be inlined because it invokes
call_.call(), // a user-controlled reaction, but any resulting OnDone can be inlined in
[this](bool ok) { // the executor to which this callback is dispatched.
reactor_.load(std::memory_order_relaxed)->OnWriteDone(ok); write_tag_.Set(call_.call(),
MaybeDone(); [this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&write_ops_, false); &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_); write_ops_.set_core_cq_tag(&write_tag_);
this->BindReactor(reactor); this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor); this->MaybeCallOnCancel(reactor);
this->MaybeDone(); // Inlineable OnDone can be false here because there is no write
// reactor that has an inlineable OnDone; this only applies to the
// DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
} }
~ServerCallbackWriterImpl() { req_->~RequestType(); } ~ServerCallbackWriterImpl() { req_->~RequestType(); }
const RequestType* request() { return req_; } const RequestType* request() { return req_; }
void MaybeDone() override { void CallOnDone() override {
if (GPR_UNLIKELY(this->Unref() == 1)) {
reactor_.load(std::memory_order_relaxed)->OnDone(); reactor_.load(std::memory_order_relaxed)->OnDone();
grpc_call* call = call_.call(); grpc_call* call = call_.call();
auto call_requester = std::move(call_requester_); auto call_requester = std::move(call_requester_);
@ -572,7 +630,6 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
::grpc::g_core_codegen_interface->grpc_call_unref(call); ::grpc::g_core_codegen_interface->grpc_call_unref(call);
call_requester(); call_requester();
} }
}
ServerReactor* reactor() override { ServerReactor* reactor() override {
return reactor_.load(std::memory_order_relaxed); return reactor_.load(std::memory_order_relaxed);
@ -620,8 +677,13 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
static_cast<::grpc_impl::CallbackServerContext*>( static_cast<::grpc_impl::CallbackServerContext*>(
param.server_context), param.server_context),
param.call, std::move(param.call_requester)); param.call, std::move(param.call_requester));
// Inlineable OnDone can be false in the CompletionOp callback because there
// is no bidi reactor that has an inlineable OnDone; this only applies to
// the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp( param.server_context->BeginCompletionOp(
param.call, [stream](bool) { stream->MaybeDone(); }, stream); param.call,
[stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); },
stream);
ServerBidiReactor<RequestType, ResponseType>* reactor = nullptr; ServerBidiReactor<RequestType, ResponseType>* reactor = nullptr;
if (param.status.ok()) { if (param.status.ok()) {
@ -652,8 +714,17 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
: public ServerCallbackReaderWriter<RequestType, ResponseType> { : public ServerCallbackReaderWriter<RequestType, ResponseType> {
public: public:
void Finish(::grpc::Status s) override { void Finish(::grpc::Status s) override {
finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_, // A finish tag with only MaybeDone can have its callback inlined
false); // regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no bidi reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_); finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) { if (!ctx_->sent_initial_metadata_) {
@ -671,13 +742,17 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref(); this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(), meta_tag_.Set(call_.call(),
[this](bool ok) { [this](bool ok) {
reactor_.load(std::memory_order_relaxed) ServerBidiReactor<RequestType, ResponseType>* reactor =
->OnSendInitialMetadataDone(ok); reactor_.load(std::memory_order_relaxed);
MaybeDone(); reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&meta_ops_, false); &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -733,28 +808,32 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void SetupReactor(ServerBidiReactor<RequestType, ResponseType>* reactor) { void SetupReactor(ServerBidiReactor<RequestType, ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed); reactor_.store(reactor, std::memory_order_relaxed);
write_tag_.Set( // The callbacks for these functions should not be inlined because they
call_.call(), // invoke user-controlled reactions, but any resulting OnDones can be
[this](bool ok) { // inlined in the executor to which a callback is dispatched.
reactor_.load(std::memory_order_relaxed)->OnWriteDone(ok); write_tag_.Set(call_.call(),
MaybeDone(); [this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&write_ops_, false); &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_); write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(), read_tag_.Set(call_.call(),
[this](bool ok) { [this, reactor](bool ok) {
reactor_.load(std::memory_order_relaxed)->OnReadDone(ok); reactor->OnReadDone(ok);
MaybeDone(); this->MaybeDone(/*inlineable_ondone=*/true);
}, },
&read_ops_, false); &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_); read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor); this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor); this->MaybeCallOnCancel(reactor);
this->MaybeDone(); // Inlineable OnDone can be false here because there is no bidi
// reactor that has an inlineable OnDone; this only applies to the
// DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
} }
void MaybeDone() override { void CallOnDone() override {
if (GPR_UNLIKELY(this->Unref() == 1)) {
reactor_.load(std::memory_order_relaxed)->OnDone(); reactor_.load(std::memory_order_relaxed)->OnDone();
grpc_call* call = call_.call(); grpc_call* call = call_.call();
auto call_requester = std::move(call_requester_); auto call_requester = std::move(call_requester_);
@ -762,7 +841,6 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
::grpc::g_core_codegen_interface->grpc_call_unref(call); ::grpc::g_core_codegen_interface->grpc_call_unref(call);
call_requester(); call_requester();
} }
}
ServerReactor* reactor() override { ServerReactor* reactor() override {
return reactor_.load(std::memory_order_relaxed); return reactor_.load(std::memory_order_relaxed);

@ -73,11 +73,33 @@ class ServerCallbackCall {
public: public:
virtual ~ServerCallbackCall() {} virtual ~ServerCallbackCall() {}
// This object is responsible for tracking when it is safe to call // This object is responsible for tracking when it is safe to call OnDone and
// OnCancel. This function should not be called until after the method handler // OnCancel. OnDone should not be called until the method handler is complete,
// is done and the RPC has completed with a cancellation. This is tracked by // Finish has been called, the ServerContext CompletionOp (which tracks
// counting how many of these conditions have been met and calling OnCancel // cancellation or successful completion) has completed, and all outstanding
// when none remain unmet. // Read/Write actions have seen their reactions. OnCancel should not be called
// until after the method handler is done and the RPC has completed with a
// cancellation. This is tracked by counting how many of these conditions have
// been met and calling OnCancel when none remain unmet.
// Public versions of MaybeDone: one where we don't know the reactor in
// advance (used for the ServerContext CompletionOp), and one for where we
// know the inlineability of the OnDone reaction. You should set the inline
// flag to true if either the Reactor is InternalInlineable() or if this
// callback is already being forced to run dispatched to an executor
// (typically because it contains additional work than just the MaybeDone).
void MaybeDone() {
if (GPR_UNLIKELY(Unref() == 1)) {
ScheduleOnDone(reactor()->InternalInlineable());
}
}
void MaybeDone(bool inline_ondone) {
if (GPR_UNLIKELY(Unref() == 1)) {
ScheduleOnDone(inline_ondone);
}
}
// Fast version called with known reactor passed in, used from derived // Fast version called with known reactor passed in, used from derived
// classes, typically in non-cancel case // classes, typically in non-cancel case
@ -101,14 +123,17 @@ class ServerCallbackCall {
/// Increases the reference count /// Increases the reference count
void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); } void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
/// Decreases the reference count and returns the previous value
int Unref() {
return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
}
private: private:
virtual ServerReactor* reactor() = 0; virtual ServerReactor* reactor() = 0;
virtual void MaybeDone() = 0;
// CallOnDone performs the work required at completion of the RPC: invoking
// the OnDone function and doing all necessary cleanup. This function is only
// ever invoked on a fully-Unref'fed ServerCallbackCall.
virtual void CallOnDone() = 0;
// If the OnDone reaction is inlineable, execute it inline. Otherwise send it
// to an executor.
void ScheduleOnDone(bool inline_ondone);
// If the OnCancel reaction is inlineable, execute it inline. Otherwise send // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
// it to an executor. // it to an executor.
@ -121,6 +146,11 @@ class ServerCallbackCall {
1, std::memory_order_acq_rel) == 1; 1, std::memory_order_acq_rel) == 1;
} }
/// Decreases the reference count and returns the previous value
int Unref() {
return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
}
std::atomic_int on_cancel_conditions_remaining_{2}; std::atomic_int on_cancel_conditions_remaining_{2};
std::atomic_int callbacks_outstanding_{ std::atomic_int callbacks_outstanding_{
3}; // reserve for start, Finish, and CompletionOp 3}; // reserve for start, Finish, and CompletionOp

@ -474,7 +474,7 @@ class ServerContextBase {
::grpc::Status status() const { return status_; } ::grpc::Status status() const { return status_; }
private: private:
void MaybeDone() override {} void CallOnDone() override {}
::grpc_impl::internal::ServerReactor* reactor() override { ::grpc_impl::internal::ServerReactor* reactor() override {
return reactor_; return reactor_;
} }

@ -55,12 +55,13 @@ class TlsKeyMaterialsConfig {
} }
int version() const { return version_; } int version() const { return version_; }
/** Setter for key materials that will be called by the user. The setter /** Setter for key materials that will be called by the user. Ownership of the
* transfers ownership of the arguments to the config. **/ * arguments will not be transferred. **/
void set_pem_root_certs(grpc::string pem_root_certs); void set_pem_root_certs(const grpc::string& pem_root_certs);
void add_pem_key_cert_pair(const PemKeyCertPair& pem_key_cert_pair); void add_pem_key_cert_pair(const PemKeyCertPair& pem_key_cert_pair);
void set_key_materials(grpc::string pem_root_certs, void set_key_materials(
std::vector<PemKeyCertPair> pem_key_cert_pair_list); const grpc::string& pem_root_certs,
const std::vector<PemKeyCertPair>& pem_key_cert_pair_list);
void set_version(int version) { version_ = version; }; void set_version(int version) { version_ = version; };
private: private:
@ -80,30 +81,26 @@ class TlsKeyMaterialsConfig {
class TlsCredentialReloadArg { class TlsCredentialReloadArg {
public: public:
/** TlsCredentialReloadArg does not take ownership of the C arg that is passed /** TlsCredentialReloadArg does not take ownership of the C arg that is passed
* to the constructor. One must remember to free any memory allocated to the C * to the constructor. One must remember to free any memory allocated to the
* arg after using the setter functions below. **/ * C arg after using the setter functions below. **/
TlsCredentialReloadArg(grpc_tls_credential_reload_arg* arg); TlsCredentialReloadArg(grpc_tls_credential_reload_arg* arg);
~TlsCredentialReloadArg(); ~TlsCredentialReloadArg();
/** Getters for member fields. The callback function is not exposed. /** Getters for member fields. **/
* They return the corresponding fields of the underlying C arg. In the case
* of the key materials config, it creates a new instance of the C++ key
* materials config from the underlying C grpc_tls_key_materials_config. **/
void* cb_user_data() const; void* cb_user_data() const;
bool is_pem_key_cert_pair_list_empty() const; bool is_pem_key_cert_pair_list_empty() const;
grpc_ssl_certificate_config_reload_status status() const; grpc_ssl_certificate_config_reload_status status() const;
grpc::string error_details() const; grpc::string error_details() const;
/** Setters for member fields. They modify the fields of the underlying C arg. /** Setters for member fields. Ownership of the arguments will not be
* The setters for the key_materials_config and the error_details allocate * transferred. **/
* memory when modifying c_arg_, so one must remember to free c_arg_'s
* original key_materials_config or error_details after using the appropriate
* setter function.
* **/
void set_cb_user_data(void* cb_user_data); void set_cb_user_data(void* cb_user_data);
void set_pem_root_certs(const grpc::string& pem_root_certs); void set_pem_root_certs(const grpc::string& pem_root_certs);
void add_pem_key_cert_pair( void add_pem_key_cert_pair(
TlsKeyMaterialsConfig::PemKeyCertPair pem_key_cert_pair); const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair);
void set_key_materials(const grpc::string& pem_root_certs,
std::vector<TlsKeyMaterialsConfig::PemKeyCertPair>
pem_key_cert_pair_list);
void set_key_materials_config( void set_key_materials_config(
const std::shared_ptr<TlsKeyMaterialsConfig>& key_materials_config); const std::shared_ptr<TlsKeyMaterialsConfig>& key_materials_config);
void set_status(grpc_ssl_certificate_config_reload_status status); void set_status(grpc_ssl_certificate_config_reload_status status);
@ -187,8 +184,7 @@ class TlsServerAuthorizationCheckArg {
TlsServerAuthorizationCheckArg(grpc_tls_server_authorization_check_arg* arg); TlsServerAuthorizationCheckArg(grpc_tls_server_authorization_check_arg* arg);
~TlsServerAuthorizationCheckArg(); ~TlsServerAuthorizationCheckArg();
/** Getters for member fields. They return the corresponding fields of the /** Getters for member fields. **/
* underlying C arg.**/
void* cb_user_data() const; void* cb_user_data() const;
int success() const; int success() const;
grpc::string target_name() const; grpc::string target_name() const;
@ -197,12 +193,7 @@ class TlsServerAuthorizationCheckArg {
grpc_status_code status() const; grpc_status_code status() const;
grpc::string error_details() const; grpc::string error_details() const;
/** Setters for member fields. They modify the fields of the underlying C arg. /** Setters for member fields. **/
* The setters for target_name, peer_cert, and error_details allocate memory
* when modifying c_arg_, so one must remember to free c_arg_'s original
* target_name, peer_cert, or error_details after using the appropriate setter
* function.
* **/
void set_cb_user_data(void* cb_user_data); void set_cb_user_data(void* cb_user_data);
void set_success(int success); void set_success(int success);
void set_target_name(const grpc::string& target_name); void set_target_name(const grpc::string& target_name);

@ -241,7 +241,7 @@ if "linux" in sys.platform:
if not "win32" in sys.platform: if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',) EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform: if "win32" in sys.platform:
EXTENSION_LIBRARIES += ('advapi32', 'ws2_32',) EXTENSION_LIBRARIES += ('advapi32', 'ws2_32', 'dbghelp',)
if BUILD_WITH_SYSTEM_OPENSSL: if BUILD_WITH_SYSTEM_OPENSSL:
EXTENSION_LIBRARIES += ('ssl', 'crypto',) EXTENSION_LIBRARIES += ('ssl', 'crypto',)
if BUILD_WITH_SYSTEM_ZLIB: if BUILD_WITH_SYSTEM_ZLIB:

@ -1595,7 +1595,7 @@ void ChannelData::CreateResolvingLoadBalancingPolicyLocked() {
LoadBalancingPolicy::Args lb_args; LoadBalancingPolicy::Args lb_args;
lb_args.combiner = combiner_; lb_args.combiner = combiner_;
lb_args.channel_control_helper = lb_args.channel_control_helper =
grpc_core::MakeUnique<ClientChannelControlHelper>(this); absl::make_unique<ClientChannelControlHelper>(this);
lb_args.args = channel_args_; lb_args.args = channel_args_;
grpc_core::UniquePtr<char> target_uri(gpr_strdup(target_uri_.get())); grpc_core::UniquePtr<char> target_uri(gpr_strdup(target_uri_.get()));
resolving_lb_policy_.reset(new ResolvingLoadBalancingPolicy( resolving_lb_policy_.reset(new ResolvingLoadBalancingPolicy(
@ -1871,7 +1871,7 @@ void ChannelData::StartTransportOpLocked(void* arg, grpc_error* /*ignored*/) {
MemoryOrder::RELEASE); MemoryOrder::RELEASE);
chand->UpdateStateAndPickerLocked( chand->UpdateStateAndPickerLocked(
GRPC_CHANNEL_SHUTDOWN, "shutdown from API", GRPC_CHANNEL_SHUTDOWN, "shutdown from API",
grpc_core::MakeUnique<LoadBalancingPolicy::TransientFailurePicker>( absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
GRPC_ERROR_REF(op->disconnect_with_error))); GRPC_ERROR_REF(op->disconnect_with_error)));
} }
} }

@ -385,5 +385,5 @@ void grpc_http_connect_register_handshaker_factory() {
using namespace grpc_core; using namespace grpc_core;
HandshakerRegistry::RegisterHandshakerFactory( HandshakerRegistry::RegisterHandshakerFactory(
true /* at_start */, HANDSHAKER_CLIENT, true /* at_start */, HANDSHAKER_CLIENT,
grpc_core::MakeUnique<HttpConnectHandshakerFactory>()); absl::make_unique<HttpConnectHandshakerFactory>());
} }

@ -714,9 +714,9 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
client_stats = parent_->lb_calld_->client_stats()->Ref(); client_stats = parent_->lb_calld_->client_stats()->Ref();
} }
parent_->channel_control_helper()->UpdateState( parent_->channel_control_helper()->UpdateState(
state, grpc_core::MakeUnique<Picker>(parent_.get(), parent_->serverlist_, state,
std::move(picker), absl::make_unique<Picker>(parent_.get(), parent_->serverlist_,
std::move(client_stats))); std::move(picker), std::move(client_stats)));
} }
void GrpcLb::Helper::RequestReresolution() { void GrpcLb::Helper::RequestReresolution() {
@ -1946,7 +1946,7 @@ bool maybe_add_client_load_reporting_filter(grpc_channel_stack_builder* builder,
void grpc_lb_policy_grpclb_init() { void grpc_lb_policy_grpclb_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder:: grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory( RegisterLoadBalancingPolicyFactory(
grpc_core::MakeUnique<grpc_core::GrpcLbFactory>()); absl::make_unique<grpc_core::GrpcLbFactory>());
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter, maybe_add_client_load_reporting_filter,

@ -201,7 +201,7 @@ void PickFirst::AttemptToConnectUsingLatestUpdateArgsLocked() {
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
return; return;
} }
// If one of the subchannels in the new list is already in state // If one of the subchannels in the new list is already in state
@ -319,10 +319,10 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
} else { } else {
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, grpc_core::MakeUnique<QueuePicker>(p->Ref( GRPC_CHANNEL_CONNECTING, absl::make_unique<QueuePicker>(p->Ref(
DEBUG_LOCATION, "QueuePicker"))); DEBUG_LOCATION, "QueuePicker")));
} }
} else { } else {
@ -338,7 +338,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
p->selected_ = nullptr; p->selected_ = nullptr;
p->subchannel_list_.reset(); p->subchannel_list_.reset();
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_IDLE, grpc_core::MakeUnique<QueuePicker>( GRPC_CHANNEL_IDLE, absl::make_unique<QueuePicker>(
p->Ref(DEBUG_LOCATION, "QueuePicker"))); p->Ref(DEBUG_LOCATION, "QueuePicker")));
} else { } else {
// This is unlikely but can happen when a subchannel has been asked // This is unlikely but can happen when a subchannel has been asked
@ -347,10 +347,10 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
if (connectivity_state == GRPC_CHANNEL_READY) { if (connectivity_state == GRPC_CHANNEL_READY) {
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, GRPC_CHANNEL_READY,
grpc_core::MakeUnique<Picker>(subchannel()->Ref())); absl::make_unique<Picker>(subchannel()->Ref()));
} else { // CONNECTING } else { // CONNECTING
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
connectivity_state, grpc_core::MakeUnique<QueuePicker>( connectivity_state, absl::make_unique<QueuePicker>(
p->Ref(DEBUG_LOCATION, "QueuePicker"))); p->Ref(DEBUG_LOCATION, "QueuePicker")));
} }
} }
@ -395,7 +395,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
} }
} }
sd->CheckConnectivityStateAndStartWatchingLocked(); sd->CheckConnectivityStateAndStartWatchingLocked();
@ -406,7 +406,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// Only update connectivity state in case 1. // Only update connectivity state in case 1.
if (subchannel_list() == p->subchannel_list_.get()) { if (subchannel_list() == p->subchannel_list_.get()) {
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, grpc_core::MakeUnique<QueuePicker>(p->Ref( GRPC_CHANNEL_CONNECTING, absl::make_unique<QueuePicker>(p->Ref(
DEBUG_LOCATION, "QueuePicker"))); DEBUG_LOCATION, "QueuePicker")));
} }
break; break;
@ -446,7 +446,7 @@ void PickFirst::PickFirstSubchannelData::ProcessUnselectedReadyLocked() {
} }
p->selected_ = this; p->selected_ = this;
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, grpc_core::MakeUnique<Picker>(subchannel()->Ref())); GRPC_CHANNEL_READY, absl::make_unique<Picker>(subchannel()->Ref()));
for (size_t i = 0; i < subchannel_list()->num_subchannels(); ++i) { for (size_t i = 0; i < subchannel_list()->num_subchannels(); ++i) {
if (i != Index()) { if (i != Index()) {
subchannel_list()->subchannel(i)->ShutdownLocked(); subchannel_list()->subchannel(i)->ShutdownLocked();
@ -503,7 +503,7 @@ class PickFirstFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_pick_first_init() { void grpc_lb_policy_pick_first_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder:: grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory( RegisterLoadBalancingPolicyFactory(
grpc_core::MakeUnique<grpc_core::PickFirstFactory>()); absl::make_unique<grpc_core::PickFirstFactory>());
} }
void grpc_lb_policy_pick_first_shutdown() {} void grpc_lb_policy_pick_first_shutdown() {}

@ -322,12 +322,12 @@ void RoundRobin::RoundRobinSubchannelList::
if (num_ready_ > 0) { if (num_ready_ > 0) {
/* 1) READY */ /* 1) READY */
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, grpc_core::MakeUnique<Picker>(p, this)); GRPC_CHANNEL_READY, absl::make_unique<Picker>(p, this));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
/* 2) CONNECTING */ /* 2) CONNECTING */
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, grpc_core::MakeUnique<QueuePicker>( GRPC_CHANNEL_CONNECTING,
p->Ref(DEBUG_LOCATION, "QueuePicker"))); absl::make_unique<QueuePicker>(p->Ref(DEBUG_LOCATION, "QueuePicker")));
} else if (num_transient_failure_ == num_subchannels()) { } else if (num_transient_failure_ == num_subchannels()) {
/* 3) TRANSIENT_FAILURE */ /* 3) TRANSIENT_FAILURE */
grpc_error* error = grpc_error* error =
@ -336,7 +336,7 @@ void RoundRobin::RoundRobinSubchannelList::
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
p->channel_control_helper()->UpdateState( p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
} }
} }
@ -453,7 +453,7 @@ void RoundRobin::UpdateLocked(UpdateArgs args) {
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
subchannel_list_ = std::move(latest_pending_subchannel_list_); subchannel_list_ = std::move(latest_pending_subchannel_list_);
} else if (subchannel_list_ == nullptr) { } else if (subchannel_list_ == nullptr) {
// If there is no current list, immediately promote the new list to // If there is no current list, immediately promote the new list to
@ -498,7 +498,7 @@ class RoundRobinFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_round_robin_init() { void grpc_lb_policy_round_robin_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder:: grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory( RegisterLoadBalancingPolicyFactory(
grpc_core::MakeUnique<grpc_core::RoundRobinFactory>()); absl::make_unique<grpc_core::RoundRobinFactory>());
} }
void grpc_lb_policy_round_robin_shutdown() {} void grpc_lb_policy_round_robin_shutdown() {}

@ -148,7 +148,7 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
LoadBalancingPolicy::Args args; LoadBalancingPolicy::Args args;
args.combiner = parent_->combiner(); args.combiner = parent_->combiner();
args.args = parent_->args_; args.args = parent_->args_;
args.channel_control_helper = grpc_core::MakeUnique<Helper>(parent_->Ref()); args.channel_control_helper = absl::make_unique<Helper>(parent_->Ref());
parent_->child_policy_ = parent_->child_policy_ =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"xds_experimental", std::move(args)); "xds_experimental", std::move(args));
@ -173,7 +173,7 @@ void CdsLb::ClusterWatcher::OnError(grpc_error* error) {
if (parent_->child_policy_ == nullptr) { if (parent_->child_policy_ == nullptr) {
parent_->channel_control_helper()->UpdateState( parent_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
} else { } else {
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -273,7 +273,7 @@ void CdsLb::UpdateLocked(UpdateArgs args) {
xds_client_->CancelClusterDataWatch( xds_client_->CancelClusterDataWatch(
StringView(old_config->cluster().c_str()), cluster_watcher_); StringView(old_config->cluster().c_str()), cluster_watcher_);
} }
auto watcher = grpc_core::MakeUnique<ClusterWatcher>(Ref()); auto watcher = absl::make_unique<ClusterWatcher>(Ref());
cluster_watcher_ = watcher.get(); cluster_watcher_ = watcher.get();
xds_client_->WatchClusterData(StringView(config_->cluster().c_str()), xds_client_->WatchClusterData(StringView(config_->cluster().c_str()),
std::move(watcher)); std::move(watcher));
@ -335,7 +335,7 @@ class CdsFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_cds_init() { void grpc_lb_policy_cds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder:: grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory( RegisterLoadBalancingPolicyFactory(
grpc_core::MakeUnique<grpc_core::CdsFactory>()); absl::make_unique<grpc_core::CdsFactory>());
} }
void grpc_lb_policy_cds_shutdown() {} void grpc_lb_policy_cds_shutdown() {}

@ -809,7 +809,7 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
xds_client()->CancelEndpointDataWatch(StringView(old_eds_service_name), xds_client()->CancelEndpointDataWatch(StringView(old_eds_service_name),
endpoint_watcher_); endpoint_watcher_);
} }
auto watcher = grpc_core::MakeUnique<EndpointWatcher>( auto watcher = absl::make_unique<EndpointWatcher>(
Ref(DEBUG_LOCATION, "EndpointWatcher")); Ref(DEBUG_LOCATION, "EndpointWatcher"));
endpoint_watcher_ = watcher.get(); endpoint_watcher_ = watcher.get();
xds_client()->WatchEndpointData(StringView(eds_service_name()), xds_client()->WatchEndpointData(StringView(eds_service_name()),
@ -1060,7 +1060,7 @@ void XdsLb::UpdateXdsPickerLocked() {
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(error)); absl::make_unique<TransientFailurePicker>(error));
return; return;
} }
priorities_[current_priority_]->UpdateXdsPickerLocked(); priorities_[current_priority_]->UpdateXdsPickerLocked();
@ -1150,7 +1150,7 @@ XdsLb::LocalityMap::LocalityMap(RefCountedPtr<XdsLb> xds_policy,
if (priority_ == 0) { if (priority_ == 0) {
xds_policy_->channel_control_helper()->UpdateState( xds_policy_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, GRPC_CHANNEL_CONNECTING,
grpc_core::MakeUnique<QueuePicker>( absl::make_unique<QueuePicker>(
xds_policy_->Ref(DEBUG_LOCATION, "QueuePicker"))); xds_policy_->Ref(DEBUG_LOCATION, "QueuePicker")));
} }
} }
@ -1225,7 +1225,7 @@ void XdsLb::LocalityMap::UpdateXdsPickerLocked() {
} }
xds_policy()->channel_control_helper()->UpdateState( xds_policy()->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, GRPC_CHANNEL_READY,
grpc_core::MakeUnique<LocalityPicker>( absl::make_unique<LocalityPicker>(
xds_policy_->Ref(DEBUG_LOCATION, "LocalityPicker"), xds_policy_->Ref(DEBUG_LOCATION, "LocalityPicker"),
std::move(picker_list))); std::move(picker_list)));
} }
@ -1870,7 +1870,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_xds_init() { void grpc_lb_policy_xds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder:: grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory( RegisterLoadBalancingPolicyFactory(
grpc_core::MakeUnique<grpc_core::XdsFactory>()); absl::make_unique<grpc_core::XdsFactory>());
} }
void grpc_lb_policy_xds_shutdown() {} void grpc_lb_policy_xds_shutdown() {}

@ -499,7 +499,7 @@ void grpc_resolver_dns_ares_init() {
} }
grpc_set_resolver_impl(&ares_resolver); grpc_set_resolver_impl(&ares_resolver);
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::AresDnsResolverFactory>()); absl::make_unique<grpc_core::AresDnsResolverFactory>());
} else { } else {
g_use_ares_dns_resolver = false; g_use_ares_dns_resolver = false;
} }

@ -173,7 +173,7 @@ class GrpcPolledFdFactoryLibuv : public GrpcPolledFdFactory {
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory( std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* combiner) { Combiner* combiner) {
return grpc_core::MakeUnique<GrpcPolledFdFactoryLibuv>(); return absl::make_unique<GrpcPolledFdFactoryLibuv>();
} }
} // namespace grpc_core } // namespace grpc_core

@ -99,7 +99,7 @@ class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory {
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory( std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* /*combiner*/) { Combiner* /*combiner*/) {
return grpc_core::MakeUnique<GrpcPolledFdFactoryPosix>(); return absl::make_unique<GrpcPolledFdFactoryPosix>();
} }
} // namespace grpc_core } // namespace grpc_core

@ -934,7 +934,7 @@ class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory {
std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory( std::unique_ptr<GrpcPolledFdFactory> NewGrpcPolledFdFactory(
Combiner* combiner) { Combiner* combiner) {
return grpc_core::MakeUnique<GrpcPolledFdFactoryWindows>(combiner); return absl::make_unique<GrpcPolledFdFactoryWindows>(combiner);
} }
} // namespace grpc_core } // namespace grpc_core

@ -185,7 +185,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
"request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r, "request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r,
hr->host); hr->host);
if (*r->addresses_out == nullptr) { if (*r->addresses_out == nullptr) {
*r->addresses_out = grpc_core::MakeUnique<ServerAddressList>(); *r->addresses_out = absl::make_unique<ServerAddressList>();
} }
ServerAddressList& addresses = **r->addresses_out; ServerAddressList& addresses = **r->addresses_out;
for (size_t i = 0; hostent->h_addr_list[i] != nullptr; ++i) { for (size_t i = 0; hostent->h_addr_list[i] != nullptr; ++i) {
@ -480,7 +480,7 @@ static bool inner_resolve_as_ip_literal_locked(
grpc_parse_ipv6_hostport(hostport->get(), &addr, grpc_parse_ipv6_hostport(hostport->get(), &addr,
false /* log errors */)) { false /* log errors */)) {
GPR_ASSERT(*addrs == nullptr); GPR_ASSERT(*addrs == nullptr);
*addrs = grpc_core::MakeUnique<ServerAddressList>(); *addrs = absl::make_unique<ServerAddressList>();
(*addrs)->emplace_back(addr.addr, addr.len, nullptr /* args */); (*addrs)->emplace_back(addr.addr, addr.len, nullptr /* args */);
return true; return true;
} }
@ -543,7 +543,7 @@ static bool inner_maybe_resolve_localhost_manually_locked(
} }
if (gpr_stricmp(host->get(), "localhost") == 0) { if (gpr_stricmp(host->get(), "localhost") == 0) {
GPR_ASSERT(*addrs == nullptr); GPR_ASSERT(*addrs == nullptr);
*addrs = grpc_core::MakeUnique<grpc_core::ServerAddressList>(); *addrs = absl::make_unique<grpc_core::ServerAddressList>();
uint16_t numeric_port = grpc_strhtons(port->get()); uint16_t numeric_port = grpc_strhtons(port->get());
// Append the ipv6 loopback address. // Append the ipv6 loopback address.
struct sockaddr_in6 ipv6_loopback_addr; struct sockaddr_in6 ipv6_loopback_addr;

@ -305,7 +305,7 @@ void grpc_resolver_dns_native_init() {
if (gpr_stricmp(resolver.get(), "native") == 0) { if (gpr_stricmp(resolver.get(), "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver"); gpr_log(GPR_DEBUG, "Using native dns resolver");
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::NativeDnsResolverFactory>()); absl::make_unique<grpc_core::NativeDnsResolverFactory>());
} else { } else {
grpc_core::ResolverRegistry::Builder::InitRegistry(); grpc_core::ResolverRegistry::Builder::InitRegistry();
grpc_core::ResolverFactory* existing_factory = grpc_core::ResolverFactory* existing_factory =
@ -313,7 +313,7 @@ void grpc_resolver_dns_native_init() {
if (existing_factory == nullptr) { if (existing_factory == nullptr) {
gpr_log(GPR_DEBUG, "Using native dns resolver"); gpr_log(GPR_DEBUG, "Using native dns resolver");
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::NativeDnsResolverFactory>()); absl::make_unique<grpc_core::NativeDnsResolverFactory>());
} }
} }
} }

@ -386,7 +386,7 @@ class FakeResolverFactory : public ResolverFactory {
void grpc_resolver_fake_init() { void grpc_resolver_fake_init() {
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::FakeResolverFactory>()); absl::make_unique<grpc_core::FakeResolverFactory>());
} }
void grpc_resolver_fake_shutdown() {} void grpc_resolver_fake_shutdown() {}

@ -176,12 +176,12 @@ class UnixResolverFactory : public ResolverFactory {
void grpc_resolver_sockaddr_init() { void grpc_resolver_sockaddr_init() {
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::IPv4ResolverFactory>()); absl::make_unique<grpc_core::IPv4ResolverFactory>());
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::IPv6ResolverFactory>()); absl::make_unique<grpc_core::IPv6ResolverFactory>());
#ifdef GRPC_HAVE_UNIX_SOCKET #ifdef GRPC_HAVE_UNIX_SOCKET
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::UnixResolverFactory>()); absl::make_unique<grpc_core::UnixResolverFactory>());
#endif #endif
} }

@ -91,7 +91,7 @@ void XdsResolver::StartLocked() {
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>( xds_client_ = MakeOrphanable<XdsClient>(
combiner(), interested_parties_, StringView(server_name_.get()), combiner(), interested_parties_, StringView(server_name_.get()),
grpc_core::MakeUnique<ServiceConfigWatcher>(Ref()), *args_, &error); absl::make_unique<ServiceConfigWatcher>(Ref()), *args_, &error);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"Failed to create xds client -- channel will remain in " "Failed to create xds client -- channel will remain in "
@ -129,7 +129,7 @@ class XdsResolverFactory : public ResolverFactory {
void grpc_resolver_xds_init() { void grpc_resolver_xds_init() {
grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
grpc_core::MakeUnique<grpc_core::XdsResolverFactory>()); absl::make_unique<grpc_core::XdsResolverFactory>());
} }
void grpc_resolver_xds_shutdown() {} void grpc_resolver_xds_shutdown() {}

@ -54,7 +54,7 @@ size_t ClientChannelServiceConfigParser::ParserIndex() {
void ClientChannelServiceConfigParser::Register() { void ClientChannelServiceConfigParser::Register() {
g_client_channel_service_config_parser_index = ServiceConfig::RegisterParser( g_client_channel_service_config_parser_index = ServiceConfig::RegisterParser(
grpc_core::MakeUnique<ClientChannelServiceConfigParser>()); absl::make_unique<ClientChannelServiceConfigParser>());
} }
namespace { namespace {
@ -95,7 +95,7 @@ std::unique_ptr<ClientChannelMethodParsedConfig::RetryPolicy> ParseRetryPolicy(
const Json& json, grpc_error** error) { const Json& json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
auto retry_policy = auto retry_policy =
grpc_core::MakeUnique<ClientChannelMethodParsedConfig::RetryPolicy>(); absl::make_unique<ClientChannelMethodParsedConfig::RetryPolicy>();
if (json.type() != Json::Type::OBJECT) { if (json.type() != Json::Type::OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryPolicy error:should be of type object"); "field:retryPolicy error:should be of type object");
@ -387,7 +387,7 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const Json& json,
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel global parser", *error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel global parser",
&error_list); &error_list);
if (*error == GRPC_ERROR_NONE) { if (*error == GRPC_ERROR_NONE) {
return grpc_core::MakeUnique<ClientChannelGlobalParsedConfig>( return absl::make_unique<ClientChannelGlobalParsedConfig>(
std::move(parsed_lb_config), std::move(lb_policy_name), std::move(parsed_lb_config), std::move(lb_policy_name),
retry_throttling, health_check_service_name); retry_throttling, health_check_service_name);
} }
@ -433,7 +433,7 @@ ClientChannelServiceConfigParser::ParsePerMethodParams(const Json& json,
} }
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel parser", &error_list); *error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel parser", &error_list);
if (*error == GRPC_ERROR_NONE) { if (*error == GRPC_ERROR_NONE) {
return grpc_core::MakeUnique<ClientChannelMethodParsedConfig>( return absl::make_unique<ClientChannelMethodParsedConfig>(
timeout, wait_for_ready, std::move(retry_policy)); timeout, wait_for_ready, std::move(retry_policy));
} }
return nullptr; return nullptr;

@ -188,15 +188,15 @@ ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
GPR_ASSERT(process_resolver_result != nullptr); GPR_ASSERT(process_resolver_result != nullptr);
resolver_ = ResolverRegistry::CreateResolver( resolver_ = ResolverRegistry::CreateResolver(
target_uri_.get(), args.args, interested_parties(), combiner(), target_uri_.get(), args.args, interested_parties(), combiner(),
grpc_core::MakeUnique<ResolverResultHandler>(Ref())); absl::make_unique<ResolverResultHandler>(Ref()));
// Since the validity of args has been checked when create the channel, // Since the validity of args has been checked when create the channel,
// CreateResolver() must return a non-null result. // CreateResolver() must return a non-null result.
GPR_ASSERT(resolver_ != nullptr); GPR_ASSERT(resolver_ != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) { if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "resolving_lb=%p: starting name resolution", this); gpr_log(GPR_INFO, "resolving_lb=%p: starting name resolution", this);
} }
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(GRPC_CHANNEL_CONNECTING,
GRPC_CHANNEL_CONNECTING, grpc_core::MakeUnique<QueuePicker>(Ref())); absl::make_unique<QueuePicker>(Ref()));
resolver_->StartLocked(); resolver_->StartLocked();
} }
@ -262,7 +262,7 @@ void ResolvingLoadBalancingPolicy::OnResolverError(grpc_error* error) {
"Resolver transient failure", &error, 1); "Resolver transient failure", &error, 1);
channel_control_helper()->UpdateState( channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_core::MakeUnique<TransientFailurePicker>(state_error)); absl::make_unique<TransientFailurePicker>(state_error));
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }

@ -95,7 +95,7 @@ grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigVectorTable(
const Json& json, const Json& json,
InlinedVector<SliceHashTable<const ParsedConfigVector*>::Entry, 10>* InlinedVector<SliceHashTable<const ParsedConfigVector*>::Entry, 10>*
entries) { entries) {
auto objs_vector = grpc_core::MakeUnique<ParsedConfigVector>(); auto objs_vector = absl::make_unique<ParsedConfigVector>();
InlinedVector<grpc_error*, 4> error_list; InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < g_registered_parsers->size(); i++) { for (size_t i = 0; i < g_registered_parsers->size(); i++) {
grpc_error* parser_error = GRPC_ERROR_NONE; grpc_error* parser_error = GRPC_ERROR_NONE;

@ -185,7 +185,8 @@ void PopulateMetadataValue(upb_arena* arena, google_protobuf_Value* value_pb,
} }
void PopulateNode(upb_arena* arena, const XdsBootstrap::Node* node, void PopulateNode(upb_arena* arena, const XdsBootstrap::Node* node,
const char* build_version, envoy_api_v2_core_Node* node_msg) { const char* build_version, const std::string& server_name,
envoy_api_v2_core_Node* node_msg) {
if (node != nullptr) { if (node != nullptr) {
if (!node->id.empty()) { if (!node->id.empty()) {
envoy_api_v2_core_Node_set_id(node_msg, envoy_api_v2_core_Node_set_id(node_msg,
@ -200,6 +201,18 @@ void PopulateNode(upb_arena* arena, const XdsBootstrap::Node* node,
envoy_api_v2_core_Node_mutable_metadata(node_msg, arena); envoy_api_v2_core_Node_mutable_metadata(node_msg, arena);
PopulateMetadata(arena, metadata, node->metadata.object_value()); PopulateMetadata(arena, metadata, node->metadata.object_value());
} }
if (!server_name.empty()) {
google_protobuf_Struct* metadata =
envoy_api_v2_core_Node_mutable_metadata(node_msg, arena);
google_protobuf_Struct_FieldsEntry* field =
google_protobuf_Struct_add_fields(metadata, arena);
google_protobuf_Struct_FieldsEntry_set_key(
field, upb_strview_makez("PROXYLESS_CLIENT_HOSTNAME"));
google_protobuf_Value* value =
google_protobuf_Struct_FieldsEntry_mutable_value(field, arena);
google_protobuf_Value_set_string_value(
value, upb_strview_make(server_name.data(), server_name.size()));
}
if (!node->locality_region.empty() || !node->locality_zone.empty() || if (!node->locality_region.empty() || !node->locality_zone.empty() ||
!node->locality_subzone.empty()) { !node->locality_subzone.empty()) {
envoy_api_v2_core_Locality* locality = envoy_api_v2_core_Locality* locality =
@ -260,7 +273,7 @@ envoy_api_v2_DiscoveryRequest* CreateDiscoveryRequest(
if (build_version != nullptr) { if (build_version != nullptr) {
envoy_api_v2_core_Node* node_msg = envoy_api_v2_core_Node* node_msg =
envoy_api_v2_DiscoveryRequest_mutable_node(request, arena); envoy_api_v2_DiscoveryRequest_mutable_node(request, arena);
PopulateNode(arena, node, build_version, node_msg); PopulateNode(arena, node, build_version, "", node_msg);
} }
return request; return request;
} }
@ -960,15 +973,7 @@ grpc_slice XdsApi::CreateLrsInitialRequest(const std::string& server_name) {
envoy_api_v2_core_Node* node_msg = envoy_api_v2_core_Node* node_msg =
envoy_service_load_stats_v2_LoadStatsRequest_mutable_node(request, envoy_service_load_stats_v2_LoadStatsRequest_mutable_node(request,
arena.ptr()); arena.ptr());
PopulateNode(arena.ptr(), node_, build_version_, node_msg); PopulateNode(arena.ptr(), node_, build_version_, server_name, node_msg);
// Add cluster stats. There is only one because we only use one server name in
// one channel.
envoy_api_v2_endpoint_ClusterStats* cluster_stats =
envoy_service_load_stats_v2_LoadStatsRequest_add_cluster_stats(
request, arena.ptr());
// Set the cluster name.
envoy_api_v2_endpoint_ClusterStats_set_cluster_name(
cluster_stats, upb_strview_makez(server_name.c_str()));
return SerializeLrsRequest(request, arena.ptr()); return SerializeLrsRequest(request, arena.ptr());
} }

@ -42,7 +42,7 @@ std::unique_ptr<XdsBootstrap> XdsBootstrap::ReadFromFile(grpc_error** error) {
Json json = Json::Parse(StringViewFromSlice(contents), error); Json json = Json::Parse(StringViewFromSlice(contents), error);
grpc_slice_unref_internal(contents); grpc_slice_unref_internal(contents);
if (*error != GRPC_ERROR_NONE) return nullptr; if (*error != GRPC_ERROR_NONE) return nullptr;
return grpc_core::MakeUnique<XdsBootstrap>(std::move(json), error); return absl::make_unique<XdsBootstrap>(std::move(json), error);
} }
XdsBootstrap::XdsBootstrap(Json json, grpc_error** error) { XdsBootstrap::XdsBootstrap(Json json, grpc_error** error) {
@ -192,7 +192,7 @@ grpc_error* XdsBootstrap::ParseChannelCreds(Json* json, size_t idx,
grpc_error* XdsBootstrap::ParseNode(Json* json) { grpc_error* XdsBootstrap::ParseNode(Json* json) {
InlinedVector<grpc_error*, 1> error_list; InlinedVector<grpc_error*, 1> error_list;
node_ = grpc_core::MakeUnique<Node>(); node_ = absl::make_unique<Node>();
auto it = json->mutable_object()->find("id"); auto it = json->mutable_object()->find("id");
if (it != json->mutable_object()->end()) { if (it != json->mutable_object()->end()) {
if (it->second.type() != Json::Type::STRING) { if (it->second.type() != Json::Type::STRING) {

@ -86,13 +86,13 @@ MessageSizeParser::ParsePerMethodParams(const Json& json, grpc_error** error) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Message size parser", &error_list); *error = GRPC_ERROR_CREATE_FROM_VECTOR("Message size parser", &error_list);
return nullptr; return nullptr;
} }
return grpc_core::MakeUnique<MessageSizeParsedConfig>( return absl::make_unique<MessageSizeParsedConfig>(max_request_message_bytes,
max_request_message_bytes, max_response_message_bytes); max_response_message_bytes);
} }
void MessageSizeParser::Register() { void MessageSizeParser::Register() {
g_message_size_parser_index = g_message_size_parser_index =
ServiceConfig::RegisterParser(grpc_core::MakeUnique<MessageSizeParser>()); ServiceConfig::RegisterParser(absl::make_unique<MessageSizeParser>());
} }
size_t MessageSizeParser::ParserIndex() { return g_message_size_parser_index; } size_t MessageSizeParser::ParserIndex() { return g_message_size_parser_index; }

@ -28,6 +28,8 @@
#include <memory> #include <memory>
#include <utility> #include <utility>
#include "absl/memory/memory.h"
namespace grpc_core { namespace grpc_core {
class DefaultDeleteChar { class DefaultDeleteChar {
@ -44,12 +46,6 @@ class DefaultDeleteChar {
template <typename T> template <typename T>
using UniquePtr = std::unique_ptr<T, DefaultDeleteChar>; using UniquePtr = std::unique_ptr<T, DefaultDeleteChar>;
// TODO(veblush): Replace this with absl::make_unique once abseil is added.
template <typename T, typename... Args>
inline std::unique_ptr<T> MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
} // namespace grpc_core } // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_MEMORY_H */ #endif /* GRPC_CORE_LIB_GPRPP_MEMORY_H */

@ -41,11 +41,11 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include "absl/container/inlined_vector.h"
#include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/spinlock.h" #include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gpr/tls.h" #include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gpr/useful.h" #include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h" #include "src/core/lib/gprpp/sync.h"
@ -190,7 +190,15 @@ struct grpc_fd {
grpc_iomgr_unregister_object(&iomgr_object); grpc_iomgr_unregister_object(&iomgr_object);
POLLABLE_UNREF(pollable_obj, "fd_pollable"); POLLABLE_UNREF(pollable_obj, "fd_pollable");
pollset_fds.clear();
// To clear out the allocations of pollset_fds, we need to swap its
// contents with a newly-constructed (and soon to be destructed) local
// variable of its same type. This is because InlinedVector::clear is _not_
// guaranteed to actually free up allocations and this is important since
// this object doesn't have a conventional destructor.
absl::InlinedVector<int, 1> pollset_fds_tmp;
pollset_fds_tmp.swap(pollset_fds);
gpr_mu_destroy(&pollable_mu); gpr_mu_destroy(&pollable_mu);
gpr_mu_destroy(&orphan_mu); gpr_mu_destroy(&orphan_mu);
@ -232,7 +240,7 @@ struct grpc_fd {
// Protects pollable_obj and pollset_fds. // Protects pollable_obj and pollset_fds.
gpr_mu pollable_mu; gpr_mu pollable_mu;
grpc_core::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI. absl::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
pollable* pollable_obj = nullptr; // Used in PO_FD. pollable* pollable_obj = nullptr; // Used in PO_FD.
grpc_core::LockfreeEvent read_closure; grpc_core::LockfreeEvent read_closure;

@ -56,6 +56,7 @@ typedef enum {
#define GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE "Composite" #define GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE "Composite"
#define GRPC_AUTHORIZATION_METADATA_KEY "authorization" #define GRPC_AUTHORIZATION_METADATA_KEY "authorization"
#define GRPC_AUTH_QUOTA_PROJECT_METADATA_KEY "x-goog-user-project"
#define GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY \ #define GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY \
"x-goog-iam-authorization-token" "x-goog-iam-authorization-token"
#define GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY "x-goog-iam-authority-selector" #define GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY "x-goog-iam-authority-selector"

@ -74,6 +74,9 @@ grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json(
} }
result.type = GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER; result.type = GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER;
// quota_project_id is optional, so we don't check the result of the copy.
grpc_copy_json_string_property(json, "quota_project_id",
&result.quota_project_id);
if (!grpc_copy_json_string_property(json, "client_secret", if (!grpc_copy_json_string_property(json, "client_secret",
&result.client_secret) || &result.client_secret) ||
!grpc_copy_json_string_property(json, "client_id", &result.client_id) || !grpc_copy_json_string_property(json, "client_id", &result.client_id) ||
@ -114,6 +117,10 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token* refresh_token) {
gpr_free(refresh_token->refresh_token); gpr_free(refresh_token->refresh_token);
refresh_token->refresh_token = nullptr; refresh_token->refresh_token = nullptr;
} }
if (refresh_token->quota_project_id != nullptr) {
gpr_free(refresh_token->quota_project_id);
refresh_token->quota_project_id = nullptr;
}
} }
// //
@ -276,6 +283,7 @@ bool grpc_oauth2_token_fetcher_credentials::get_request_metadata(
grpc_polling_entity* pollent, grpc_auth_metadata_context /*context*/, grpc_polling_entity* pollent, grpc_auth_metadata_context /*context*/,
grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata, grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
grpc_error** /*error*/) { grpc_error** /*error*/) {
maybe_add_additional_metadata(md_array);
// Check if we can use the cached token. // Check if we can use the cached token.
grpc_millis refresh_threshold = grpc_millis refresh_threshold =
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC; GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC;
@ -453,6 +461,17 @@ void grpc_google_refresh_token_credentials::fetch_oauth2(
gpr_free(body); gpr_free(body);
} }
void grpc_google_refresh_token_credentials::maybe_add_additional_metadata(
grpc_credentials_mdelem_array* md_array) {
if (refresh_token_.quota_project_id != nullptr) {
grpc_mdelem quota_project_md = grpc_mdelem_from_slices(
grpc_core::ExternallyManagedSlice(GRPC_AUTH_QUOTA_PROJECT_METADATA_KEY),
grpc_core::ExternallyManagedSlice(refresh_token_.quota_project_id));
grpc_credentials_mdelem_array_add(md_array, quota_project_md);
GRPC_MDELEM_UNREF(quota_project_md);
}
}
grpc_google_refresh_token_credentials::grpc_google_refresh_token_credentials( grpc_google_refresh_token_credentials::grpc_google_refresh_token_credentials(
grpc_auth_refresh_token refresh_token) grpc_auth_refresh_token refresh_token)
: refresh_token_(refresh_token) {} : refresh_token_(refresh_token) {}

@ -32,12 +32,13 @@
"s&subject_token_type=%s" "s&subject_token_type=%s"
// auth_refresh_token parsing. // auth_refresh_token parsing.
typedef struct { struct grpc_auth_refresh_token {
const char* type; const char* type;
char* client_id; char* client_id;
char* client_secret; char* client_secret;
char* refresh_token; char* refresh_token;
} grpc_auth_refresh_token; char* quota_project_id = nullptr;
};
/// Returns 1 if the object is valid, 0 otherwise. /// Returns 1 if the object is valid, 0 otherwise.
int grpc_auth_refresh_token_is_valid( int grpc_auth_refresh_token_is_valid(
@ -90,6 +91,9 @@ class grpc_oauth2_token_fetcher_credentials : public grpc_call_credentials {
grpc_httpcli_context* httpcli_context, grpc_httpcli_context* httpcli_context,
grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, grpc_polling_entity* pollent, grpc_iomgr_cb_func cb,
grpc_millis deadline) = 0; grpc_millis deadline) = 0;
// Sub class may override this for adding additional metadata other than
// credentials itself.
virtual void maybe_add_additional_metadata(grpc_credentials_mdelem_array*) {}
private: private:
gpr_mu mu_; gpr_mu mu_;
@ -117,6 +121,8 @@ class grpc_google_refresh_token_credentials final
grpc_httpcli_context* httpcli_context, grpc_httpcli_context* httpcli_context,
grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, grpc_polling_entity* pollent, grpc_iomgr_cb_func cb,
grpc_millis deadline) override; grpc_millis deadline) override;
void maybe_add_additional_metadata(
grpc_credentials_mdelem_array* md_array) override;
private: private:
grpc_auth_refresh_token refresh_token_; grpc_auth_refresh_token refresh_token_;

@ -165,11 +165,19 @@ static void on_credentials_metadata(void* arg, grpc_error* input_error) {
grpc_metadata_batch* mdb = grpc_metadata_batch* mdb =
batch->payload->send_initial_metadata.send_initial_metadata; batch->payload->send_initial_metadata.send_initial_metadata;
for (size_t i = 0; i < calld->md_array.size; ++i) { for (size_t i = 0; i < calld->md_array.size; ++i) {
// Only add x-goog-user-project header if not present.
if (grpc_slice_str_cmp(GRPC_MDKEY(calld->md_array.md[i]),
GRPC_AUTH_QUOTA_PROJECT_METADATA_KEY) == 0) {
add_error(&error, grpc_metadata_batch_add_tail_when_key_not_exist(
mdb, &calld->md_links[i],
GRPC_MDELEM_REF(calld->md_array.md[i])));
} else {
add_error(&error, grpc_metadata_batch_add_tail( add_error(&error, grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i], mdb, &calld->md_links[i],
GRPC_MDELEM_REF(calld->md_array.md[i]))); GRPC_MDELEM_REF(calld->md_array.md[i])));
} }
} }
}
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
grpc_call_next_op(elem, batch); grpc_call_next_op(elem, batch);
} else { } else {

@ -559,10 +559,10 @@ RefCountedPtr<Handshaker> SecurityHandshakerCreate(
void SecurityRegisterHandshakerFactories() { void SecurityRegisterHandshakerFactories() {
HandshakerRegistry::RegisterHandshakerFactory( HandshakerRegistry::RegisterHandshakerFactory(
false /* at_start */, HANDSHAKER_CLIENT, false /* at_start */, HANDSHAKER_CLIENT,
grpc_core::MakeUnique<ClientSecurityHandshakerFactory>()); absl::make_unique<ClientSecurityHandshakerFactory>());
HandshakerRegistry::RegisterHandshakerFactory( HandshakerRegistry::RegisterHandshakerFactory(
false /* at_start */, HANDSHAKER_SERVER, false /* at_start */, HANDSHAKER_SERVER,
grpc_core::MakeUnique<ServerSecurityHandshakerFactory>()); absl::make_unique<ServerSecurityHandshakerFactory>());
} }
} // namespace grpc_core } // namespace grpc_core

@ -205,6 +205,19 @@ grpc_error* grpc_metadata_batch_add_tail(grpc_metadata_batch* batch,
return grpc_metadata_batch_link_tail(batch, storage); return grpc_metadata_batch_link_tail(batch, storage);
} }
grpc_error* grpc_metadata_batch_add_tail_when_key_not_exist(
grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
grpc_mdelem elem_to_add) {
auto cur = batch->list.head;
while (cur != nullptr) {
if (grpc_slice_cmp(GRPC_MDKEY(cur->md), GRPC_MDKEY(elem_to_add)) == 0) {
// We already have the same key, just returning.
return GRPC_ERROR_NONE;
}
}
return grpc_metadata_batch_add_tail(batch, storage, elem_to_add);
}
static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) { static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
assert_valid_list(list); assert_valid_list(list);
GPR_DEBUG_ASSERT(!GRPC_MDISNULL(storage->md)); GPR_DEBUG_ASSERT(!GRPC_MDISNULL(storage->md));

@ -138,6 +138,17 @@ grpc_error* grpc_metadata_batch_add_tail(
grpc_metadata_batch* batch, grpc_linked_mdelem* storage, grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
/** Add \a elem_to_add as the last element in \a batch, only
when the current batch doesn't have the same key in the
given element, using \a storage as backing storage for the
linked list element. \a storage is owned by the caller
and must survive for the lifetime of batch. This usually
means it should be around for the lifetime of the call.
Takes ownership of \a elem_to_add */
grpc_error* grpc_metadata_batch_add_tail_when_key_not_exist(
grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
inline grpc_error* GRPC_MUST_USE_RESULT grpc_metadata_batch_add_tail( inline grpc_error* GRPC_MUST_USE_RESULT grpc_metadata_batch_add_tail(
grpc_metadata_batch* batch, grpc_linked_mdelem* storage, grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
grpc_metadata_batch_callouts_index idx) { grpc_metadata_batch_callouts_index idx) {

@ -49,7 +49,7 @@ class BoringSslCachedSession : public SslCachedSession {
std::unique_ptr<SslCachedSession> SslCachedSession::Create( std::unique_ptr<SslCachedSession> SslCachedSession::Create(
SslSessionPtr session) { SslSessionPtr session) {
return grpc_core::MakeUnique<BoringSslCachedSession>(std::move(session)); return absl::make_unique<BoringSslCachedSession>(std::move(session));
} }
} // namespace tsi } // namespace tsi

@ -67,7 +67,7 @@ class OpenSslCachedSession : public SslCachedSession {
std::unique_ptr<SslCachedSession> SslCachedSession::Create( std::unique_ptr<SslCachedSession> SslCachedSession::Create(
SslSessionPtr session) { SslSessionPtr session) {
return grpc_core::MakeUnique<OpenSslCachedSession>(std::move(session)); return absl::make_unique<OpenSslCachedSession>(std::move(session));
} }
} // namespace tsi } // namespace tsi

@ -53,7 +53,7 @@ std::unique_ptr<AltsContext> GetAltsContextFromAuthContext(
gpr_log(GPR_ERROR, "security_level is invalid."); gpr_log(GPR_ERROR, "security_level is invalid.");
return nullptr; return nullptr;
} }
return grpc_core::MakeUnique<AltsContext>(AltsContext(ctx)); return absl::make_unique<AltsContext>(AltsContext(ctx));
} }
grpc::Status AltsClientAuthzCheck( grpc::Status AltsClientAuthzCheck(

@ -16,19 +16,18 @@
* *
*/ */
#include <grpc/support/alloc.h>
#include <grpcpp/security/tls_credentials_options.h> #include <grpcpp/security/tls_credentials_options.h>
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h" #include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#include <grpc/support/alloc.h>
#include "src/cpp/common/tls_credentials_options_util.h" #include "src/cpp/common/tls_credentials_options_util.h"
namespace grpc_impl { namespace grpc_impl {
namespace experimental { namespace experimental {
/** TLS key materials config API implementation **/ /** TLS key materials config API implementation **/
void TlsKeyMaterialsConfig::set_pem_root_certs(grpc::string pem_root_certs) { void TlsKeyMaterialsConfig::set_pem_root_certs(
pem_root_certs_ = std::move(pem_root_certs); const grpc::string& pem_root_certs) {
pem_root_certs_ = pem_root_certs;
} }
void TlsKeyMaterialsConfig::add_pem_key_cert_pair( void TlsKeyMaterialsConfig::add_pem_key_cert_pair(
@ -37,10 +36,10 @@ void TlsKeyMaterialsConfig::add_pem_key_cert_pair(
} }
void TlsKeyMaterialsConfig::set_key_materials( void TlsKeyMaterialsConfig::set_key_materials(
grpc::string pem_root_certs, const grpc::string& pem_root_certs,
std::vector<PemKeyCertPair> pem_key_cert_pair_list) { const std::vector<PemKeyCertPair>& pem_key_cert_pair_list) {
pem_key_cert_pair_list_ = std::move(pem_key_cert_pair_list); pem_key_cert_pair_list_ = pem_key_cert_pair_list;
pem_root_certs_ = std::move(pem_root_certs); pem_root_certs_ = pem_root_certs;
} }
/** TLS credential reload arg API implementation **/ /** TLS credential reload arg API implementation **/
@ -59,7 +58,6 @@ TlsCredentialReloadArg::~TlsCredentialReloadArg() {}
void* TlsCredentialReloadArg::cb_user_data() const { void* TlsCredentialReloadArg::cb_user_data() const {
return c_arg_->cb_user_data; return c_arg_->cb_user_data;
} }
bool TlsCredentialReloadArg::is_pem_key_cert_pair_list_empty() const { bool TlsCredentialReloadArg::is_pem_key_cert_pair_list_empty() const {
return c_arg_->key_materials_config->pem_key_cert_pair_list().empty(); return c_arg_->key_materials_config->pem_key_cert_pair_list().empty();
} }
@ -85,17 +83,46 @@ void TlsCredentialReloadArg::set_pem_root_certs(
c_arg_->key_materials_config->set_pem_root_certs(std::move(c_pem_root_certs)); c_arg_->key_materials_config->set_pem_root_certs(std::move(c_pem_root_certs));
} }
void TlsCredentialReloadArg::add_pem_key_cert_pair( namespace {
TlsKeyMaterialsConfig::PemKeyCertPair pem_key_cert_pair) {
::grpc_core::PemKeyCertPair ConvertToCorePemKeyCertPair(
const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair) {
grpc_ssl_pem_key_cert_pair* ssl_pair = grpc_ssl_pem_key_cert_pair* ssl_pair =
(grpc_ssl_pem_key_cert_pair*)gpr_malloc( (grpc_ssl_pem_key_cert_pair*)gpr_malloc(
sizeof(grpc_ssl_pem_key_cert_pair)); sizeof(grpc_ssl_pem_key_cert_pair));
ssl_pair->private_key = gpr_strdup(pem_key_cert_pair.private_key.c_str()); ssl_pair->private_key = gpr_strdup(pem_key_cert_pair.private_key.c_str());
ssl_pair->cert_chain = gpr_strdup(pem_key_cert_pair.cert_chain.c_str()); ssl_pair->cert_chain = gpr_strdup(pem_key_cert_pair.cert_chain.c_str());
::grpc_core::PemKeyCertPair c_pem_key_cert_pair = return ::grpc_core::PemKeyCertPair(ssl_pair);
::grpc_core::PemKeyCertPair(ssl_pair); }
} // namespace
void TlsCredentialReloadArg::add_pem_key_cert_pair(
const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair) {
c_arg_->key_materials_config->add_pem_key_cert_pair( c_arg_->key_materials_config->add_pem_key_cert_pair(
std::move(c_pem_key_cert_pair)); ConvertToCorePemKeyCertPair(pem_key_cert_pair));
}
void TlsCredentialReloadArg::set_key_materials(
const grpc::string& pem_root_certs,
std::vector<TlsKeyMaterialsConfig::PemKeyCertPair> pem_key_cert_pair_list) {
/** Initialize the |key_materials_config| field of |c_arg_|, if it has not
* already been done. **/
if (c_arg_->key_materials_config == nullptr) {
c_arg_->key_materials_config = grpc_tls_key_materials_config_create();
}
/** Convert |pem_key_cert_pair_list| to an inlined vector of ssl pairs. **/
::grpc_core::InlinedVector<::grpc_core::PemKeyCertPair, 1>
c_pem_key_cert_pair_list;
for (const auto& key_cert_pair : pem_key_cert_pair_list) {
c_pem_key_cert_pair_list.emplace_back(
ConvertToCorePemKeyCertPair(key_cert_pair));
}
/** Populate the key materials config field of |c_arg_|. **/
::grpc_core::UniquePtr<char> c_pem_root_certs(
gpr_strdup(pem_root_certs.c_str()));
c_arg_->key_materials_config->set_key_materials(std::move(c_pem_root_certs),
c_pem_key_cert_pair_list);
} }
void TlsCredentialReloadArg::set_key_materials_config( void TlsCredentialReloadArg::set_key_materials_config(
@ -288,6 +315,11 @@ TlsCredentialsOptions::TlsCredentialsOptions(
c_credentials_options_, server_verification_option); c_credentials_options_, server_verification_option);
} }
/** Whenever a TlsCredentialsOptions instance is created, the caller takes
* ownership of the c_credentials_options_ pointer (see e.g. the implementation
* of the TlsCredentials API in secure_credentials.cc). For this reason, the
* TlsCredentialsOptions destructor is not responsible for freeing
* c_credentials_options_. **/
TlsCredentialsOptions::~TlsCredentialsOptions() {} TlsCredentialsOptions::~TlsCredentialsOptions() {}
} // namespace experimental } // namespace experimental

@ -24,27 +24,59 @@
namespace grpc_impl { namespace grpc_impl {
namespace internal { namespace internal {
void ServerCallbackCall::ScheduleOnDone(bool inline_ondone) {
if (inline_ondone) {
CallOnDone();
} else {
// Unlike other uses of closure, do not Ref or Unref here since at this
// point, all the Ref'fing and Unref'fing is done for this call.
grpc_core::ExecCtx exec_ctx;
struct ClosureWithArg {
grpc_closure closure;
ServerCallbackCall* call;
explicit ClosureWithArg(ServerCallbackCall* call_arg) : call(call_arg) {
GRPC_CLOSURE_INIT(&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg =
static_cast<ClosureWithArg*>(void_arg);
arg->call->CallOnDone();
delete arg;
},
this, grpc_schedule_on_exec_ctx);
}
};
ClosureWithArg* arg = new ClosureWithArg(this);
grpc_core::Executor::Run(&arg->closure, GRPC_ERROR_NONE);
}
}
void ServerCallbackCall::CallOnCancel(ServerReactor* reactor) { void ServerCallbackCall::CallOnCancel(ServerReactor* reactor) {
if (reactor->InternalInlineable()) { if (reactor->InternalInlineable()) {
reactor->OnCancel(); reactor->OnCancel();
} else { } else {
// Ref to make sure that the closure executes before the whole call gets
// destructed, and Unref within the closure.
Ref(); Ref();
grpc_core::ExecCtx exec_ctx; grpc_core::ExecCtx exec_ctx;
struct ClosureArg { struct ClosureWithArg {
grpc_closure closure;
ServerCallbackCall* call; ServerCallbackCall* call;
ServerReactor* reactor; ServerReactor* reactor;
}; ClosureWithArg(ServerCallbackCall* call_arg, ServerReactor* reactor_arg)
ClosureArg* arg = new ClosureArg{this, reactor}; : call(call_arg), reactor(reactor_arg) {
grpc_core::Executor::Run(GRPC_CLOSURE_CREATE( GRPC_CLOSURE_INIT(&closure,
[](void* void_arg, grpc_error*) { [](void* void_arg, grpc_error*) {
ClosureArg* arg = ClosureWithArg* arg =
static_cast<ClosureArg*>(void_arg); static_cast<ClosureWithArg*>(void_arg);
arg->reactor->OnCancel(); arg->reactor->OnCancel();
arg->call->MaybeDone(); arg->call->MaybeDone();
delete arg; delete arg;
}, },
arg, nullptr), this, grpc_schedule_on_exec_ctx);
GRPC_ERROR_NONE); }
};
ClosureWithArg* arg = new ClosureWithArg(this, reactor);
grpc_core::Executor::Run(&arg->closure, GRPC_ERROR_NONE);
} }
} }

@ -15,6 +15,7 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_package", "grpc_proto_library") load("//bazel:grpc_build_system.bzl", "grpc_package", "grpc_proto_library")
load("//bazel:python_rules.bzl", "py_proto_library")
grpc_package( grpc_package(
name = "core", name = "core",
@ -25,3 +26,13 @@ grpc_proto_library(
name = "stats_proto", name = "stats_proto",
srcs = ["stats.proto"], srcs = ["stats.proto"],
) )
proto_library(
name = "stats_descriptor",
srcs = ["stats.proto"],
)
py_proto_library(
name = "stats_py_pb2",
deps = [":stats_descriptor"],
)

@ -233,3 +233,55 @@ py_grpc_library(
srcs = [":test_proto_descriptor"], srcs = [":test_proto_descriptor"],
deps = [":py_test_proto"], deps = [":py_test_proto"],
) )
proto_library(
name = "worker_service_descriptor",
srcs = ["worker_service.proto"],
deps = [":control_descriptor"],
)
py_proto_library(
name = "worker_service_py_pb2",
deps = [":worker_service_descriptor"],
)
py_grpc_library(
name = "worker_service_py_pb2_grpc",
srcs = [":worker_service_descriptor"],
deps = [":worker_service_py_pb2"],
)
proto_library(
name = "stats_descriptor",
srcs = ["stats.proto"],
deps = ["//src/proto/grpc/core:stats_descriptor"],
)
py_proto_library(
name = "stats_py_pb2",
deps = [":stats_descriptor"],
)
proto_library(
name = "payloads_descriptor",
srcs = ["payloads.proto"],
)
py_proto_library(
name = "payloads_py_pb2",
deps = [":payloads_descriptor"],
)
proto_library(
name = "control_descriptor",
srcs = ["control.proto"],
deps = [
":payloads_descriptor",
":stats_descriptor",
],
)
py_proto_library(
name = "control_py_pb2",
deps = [":control_descriptor"],
)

@ -117,6 +117,9 @@ message ClientConfig {
// If 0, disabled. Else, specifies the period between gathering latency // If 0, disabled. Else, specifies the period between gathering latency
// medians in milliseconds. // medians in milliseconds.
int32 median_latency_collection_interval_millis = 20; int32 median_latency_collection_interval_millis = 20;
// Number of client processes. 0 indicates no restriction.
int32 client_processes = 21;
} }
message ClientStatus { ClientStats stats = 1; } message ClientStatus { ClientStats stats = 1; }
@ -163,6 +166,9 @@ message ServerConfig {
// Buffer pool size (no buffer pool specified if unset) // Buffer pool size (no buffer pool specified if unset)
int32 resource_quota_size = 1001; int32 resource_quota_size = 1001;
repeated ChannelArg channel_args = 1002; repeated ChannelArg channel_args = 1002;
// Number of server processes. 0 indicates no restriction.
int32 server_processes = 21;
} }
message ServerArgs { message ServerArgs {

@ -1,30 +1,5 @@
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
py_library(
name = "grpcio",
srcs = ["__init__.py"],
data = [
"//:grpc",
],
imports = ["../"],
deps = [
":utilities",
":auth",
":plugin_wrapping",
":channel",
":interceptor",
":server",
":compression",
"//src/python/grpcio/grpc/_cython:cygrpc",
"//src/python/grpcio/grpc/experimental",
"//src/python/grpcio/grpc/framework",
"@six//:six",
] + select({
"//conditions:default": ["@enum34//:enum34"],
"//:python3": [],
}),
)
py_library( py_library(
name = "auth", name = "auth",
srcs = ["_auth.py"], srcs = ["_auth.py"],
@ -85,3 +60,34 @@ py_library(
":common", ":common",
], ],
) )
py_library(
name = "_simple_stubs",
srcs = ["_simple_stubs.py"],
)
py_library(
name = "grpcio",
srcs = ["__init__.py"],
data = [
"//:grpc",
],
imports = ["../"],
deps = [
":utilities",
":auth",
":plugin_wrapping",
":channel",
":interceptor",
":server",
":compression",
":_simple_stubs",
"//src/python/grpcio/grpc/_cython:cygrpc",
"//src/python/grpcio/grpc/experimental",
"//src/python/grpcio/grpc/framework",
"@six//:six",
] + select({
"//conditions:default": ["@enum34//:enum34"],
"//:python3": [],
}),
)

@ -1879,6 +1879,11 @@ def secure_channel(target, credentials, options=None, compression=None):
A Channel. A Channel.
""" """
from grpc import _channel # pylint: disable=cyclic-import from grpc import _channel # pylint: disable=cyclic-import
from grpc.experimental import _insecure_channel_credentials
if credentials._credentials is _insecure_channel_credentials:
raise ValueError(
"secure_channel cannot be called with insecure credentials." +
" Call insecure_channel instead.")
return _channel.Channel(target, () if options is None else options, return _channel.Channel(target, () if options is None else options,
credentials._credentials, compression) credentials._credentials, compression)

@ -42,6 +42,8 @@ cdef bytes serialize(object serializer, object message):
Failure to serialize is a fatal error. Failure to serialize is a fatal error.
""" """
if isinstance(message, str):
message = message.encode('utf-8')
if serializer: if serializer:
return serializer(message) return serializer(message)
else: else:

@ -353,6 +353,9 @@ async def _handle_unary_unary_rpc(object method_handler,
object loop): object loop):
# Receives request message # Receives request message
cdef bytes request_raw = await _receive_message(rpc_state, loop) cdef bytes request_raw = await _receive_message(rpc_state, loop)
if request_raw is None:
# The RPC was cancelled immediately after start on client side.
return
# Deserializes the request message # Deserializes the request message
cdef object request_message = deserialize( cdef object request_message = deserialize(
@ -384,6 +387,8 @@ async def _handle_unary_stream_rpc(object method_handler,
object loop): object loop):
# Receives request message # Receives request message
cdef bytes request_raw = await _receive_message(rpc_state, loop) cdef bytes request_raw = await _receive_message(rpc_state, loop)
if request_raw is None:
return
# Deserializes the request message # Deserializes the request message
cdef object request_message = deserialize( cdef object request_message = deserialize(
@ -486,6 +491,8 @@ async def _handle_exceptions(RPCState rpc_state, object rpc_coro, object loop):
) )
except (KeyboardInterrupt, SystemExit): except (KeyboardInterrupt, SystemExit):
raise raise
except asyncio.CancelledError:
_LOGGER.debug('RPC cancelled for servicer method [%s]', _decode(rpc_state.method()))
except _ServerStoppedError: except _ServerStoppedError:
_LOGGER.info('Aborting RPC due to server stop.') _LOGGER.info('Aborting RPC due to server stop.')
except Exception as e: except Exception as e:

@ -0,0 +1,450 @@
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that obviate explicit stubs and explicit channels."""
import collections
import datetime
import os
import logging
import threading
from typing import (Any, AnyStr, Callable, Dict, Iterator, Optional, Sequence,
Tuple, TypeVar, Union)
import grpc
from grpc.experimental import experimental_api
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
OptionsType = Sequence[Tuple[str, str]]
CacheKey = Tuple[str, OptionsType, Optional[grpc.ChannelCredentials], Optional[
grpc.Compression]]
_LOGGER = logging.getLogger(__name__)
_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"
if _EVICTION_PERIOD_KEY in os.environ:
_EVICTION_PERIOD = datetime.timedelta(
seconds=float(os.environ[_EVICTION_PERIOD_KEY]))
_LOGGER.debug("Setting managed channel eviction period to %s",
_EVICTION_PERIOD)
else:
_EVICTION_PERIOD = datetime.timedelta(minutes=10)
_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"
if _MAXIMUM_CHANNELS_KEY in os.environ:
_MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY])
_LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS)
else:
_MAXIMUM_CHANNELS = 2**8
def _create_channel(target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
channel_credentials = channel_credentials or grpc.local_channel_credentials(
)
if channel_credentials._credentials is grpc.experimental._insecure_channel_credentials:
_LOGGER.debug(f"Creating insecure channel with options '{options}' " +
f"and compression '{compression}'")
return grpc.insecure_channel(target,
options=options,
compression=compression)
else:
_LOGGER.debug(
f"Creating secure channel with credentials '{channel_credentials}', "
+ f"options '{options}' and compression '{compression}'")
return grpc.secure_channel(target,
credentials=channel_credentials,
options=options,
compression=compression)
class ChannelCache:
# NOTE(rbellevi): Untyped due to reference cycle.
_singleton = None
_lock: threading.RLock = threading.RLock()
_condition: threading.Condition = threading.Condition(lock=_lock)
_eviction_ready: threading.Event = threading.Event()
_mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]]
_eviction_thread: threading.Thread
def __init__(self):
self._mapping = collections.OrderedDict()
self._eviction_thread = threading.Thread(
target=ChannelCache._perform_evictions, daemon=True)
self._eviction_thread.start()
@staticmethod
def get():
with ChannelCache._lock:
if ChannelCache._singleton is None:
ChannelCache._singleton = ChannelCache()
ChannelCache._eviction_ready.wait()
return ChannelCache._singleton
def _evict_locked(self, key: CacheKey):
channel, _ = self._mapping.pop(key)
_LOGGER.debug("Evicting channel %s with configuration %s.", channel,
key)
channel.close()
del channel
@staticmethod
def _perform_evictions():
while True:
with ChannelCache._lock:
ChannelCache._eviction_ready.set()
if not ChannelCache._singleton._mapping:
ChannelCache._condition.wait()
elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS:
key = next(iter(ChannelCache._singleton._mapping.keys()))
ChannelCache._singleton._evict_locked(key)
# And immediately reevaluate.
else:
key, (_, eviction_time) = next(
iter(ChannelCache._singleton._mapping.items()))
now = datetime.datetime.now()
if eviction_time <= now:
ChannelCache._singleton._evict_locked(key)
continue
else:
time_to_eviction = (eviction_time - now).total_seconds()
# NOTE: We aim to *eventually* coalesce to a state in
# which no overdue channels are in the cache and the
# length of the cache is longer than _MAXIMUM_CHANNELS.
# We tolerate momentary states in which these two
# criteria are not met.
ChannelCache._condition.wait(timeout=time_to_eviction)
def get_channel(self, target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
key = (target, options, channel_credentials, compression)
with self._lock:
channel_data = self._mapping.get(key, None)
if channel_data is not None:
channel = channel_data[0]
self._mapping.pop(key)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
return channel
else:
channel = _create_channel(target, options, channel_credentials,
compression)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
if len(self._mapping) == 1 or len(
self._mapping) >= _MAXIMUM_CHANNELS:
self._condition.notify()
return channel
def _test_only_channel_count(self) -> int:
with self._lock:
return len(self._mapping)
# TODO(rbellevi): Consider a credential type that has the
# following functionality matrix:
#
# +----------+-------+--------+
# | | local | remote |
# |----------+-------+--------+
# | secure | o | o |
# | insecure | o | x |
# +----------+-------+--------+
#
# Make this the default option.
@experimental_api
def unary_unary(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
request_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a unary-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials() or
grpc.insecure_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.unary_unary(method, request_serializer,
request_deserializer)
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def unary_stream(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
request_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a unary-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.unary_stream(method, request_serializer,
request_deserializer)
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_unary(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
request_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a stream-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.stream_unary(method, request_serializer,
request_deserializer)
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_stream(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
request_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a stream-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.stream_stream(method, request_serializer,
request_deserializer)
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)

@ -2,16 +2,7 @@ package(default_visibility = ["//visibility:public"])
py_library( py_library(
name = "aio", name = "aio",
srcs = [ srcs = glob(["aio/**/*.py"]),
"aio/__init__.py",
"aio/_base_call.py",
"aio/_call.py",
"aio/_channel.py",
"aio/_interceptor.py",
"aio/_server.py",
"aio/_typing.py",
"aio/_utils.py",
],
deps = [ deps = [
"//src/python/grpcio/grpc/_cython:cygrpc", "//src/python/grpcio/grpc/_cython:cygrpc",
], ],

@ -16,6 +16,14 @@
These APIs are subject to be removed during any minor version release. These APIs are subject to be removed during any minor version release.
""" """
import functools
import sys
import warnings
import grpc
_EXPERIMENTAL_APIS_USED = set()
class ChannelOptions(object): class ChannelOptions(object):
"""Indicates a channel option unique to gRPC Python. """Indicates a channel option unique to gRPC Python.
@ -30,3 +38,53 @@ class ChannelOptions(object):
class UsageError(Exception): class UsageError(Exception):
"""Raised by the gRPC library to indicate usage not allowed by the API.""" """Raised by the gRPC library to indicate usage not allowed by the API."""
_insecure_channel_credentials = object()
def insecure_channel_credentials():
"""Creates a ChannelCredentials for use with an insecure channel.
THIS IS AN EXPERIMENTAL API.
This is not for use with secure_channel function. Intead, this should be
used with grpc.unary_unary, grpc.unary_stream, grpc.stream_unary, or
grpc.stream_stream.
"""
return grpc.ChannelCredentials(_insecure_channel_credentials)
class ExperimentalApiWarning(Warning):
"""A warning that an API is experimental."""
def _warn_experimental(api_name, stack_offset):
if api_name not in _EXPERIMENTAL_APIS_USED:
_EXPERIMENTAL_APIS_USED.add(api_name)
msg = ("'{}' is an experimental API. It is subject to change or ".
format(api_name) +
"removal between minor releases. Proceed with caution.")
warnings.warn(msg, ExperimentalApiWarning, stacklevel=2 + stack_offset)
def experimental_api(f):
@functools.wraps(f)
def _wrapper(*args, **kwargs):
_warn_experimental(f.__name__, 1)
return f(*args, **kwargs)
return _wrapper
__all__ = (
'ChannelOptions',
'ExperimentalApiWarning',
'UsageError',
'insecure_channel_credentials',
)
if sys.version_info[0] >= 3:
from grpc._simple_stubs import unary_unary, unary_stream, stream_unary, stream_stream
__all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream)

@ -20,71 +20,50 @@ created. AsyncIO doesn't provide thread safety for most of its APIs.
from typing import Any, Optional, Sequence, Tuple from typing import Any, Optional, Sequence, Tuple
import grpc import grpc
from grpc._cython.cygrpc import (EOF, AbortError, BaseError, UsageError, from grpc._cython.cygrpc import (EOF, AbortError, BaseError, InternalError,
init_grpc_aio) UsageError, init_grpc_aio)
from ._base_call import Call, RpcContext, UnaryStreamCall, UnaryUnaryCall from ._base_call import (Call, RpcContext, StreamStreamCall, StreamUnaryCall,
UnaryStreamCall, UnaryUnaryCall)
from ._base_channel import (Channel, StreamStreamMultiCallable,
StreamUnaryMultiCallable, UnaryStreamMultiCallable,
UnaryUnaryMultiCallable)
from ._call import AioRpcError from ._call import AioRpcError
from ._channel import Channel, UnaryUnaryMultiCallable
from ._interceptor import (ClientCallDetails, InterceptedUnaryUnaryCall, from ._interceptor import (ClientCallDetails, InterceptedUnaryUnaryCall,
UnaryUnaryClientInterceptor, ServerInterceptor) UnaryUnaryClientInterceptor, ServerInterceptor)
from ._server import Server, server from ._server import server
from ._base_server import Server, ServicerContext
from ._typing import ChannelArgumentType from ._typing import ChannelArgumentType
from ._channel import insecure_channel, secure_channel
def insecure_channel(
target: str,
options: Optional[ChannelArgumentType] = None,
compression: Optional[grpc.Compression] = None,
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]] = None):
"""Creates an insecure asynchronous Channel to a server.
Args:
target: The server address
options: An optional list of key-value pairs (channel args
in gRPC Core runtime) to configure the channel.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel. This is an EXPERIMENTAL option.
interceptors: An optional sequence of interceptors that will be executed for
any call executed with this channel.
Returns:
A Channel.
"""
return Channel(target, () if options is None else options, None,
compression, interceptors)
def secure_channel(
target: str,
credentials: grpc.ChannelCredentials,
options: Optional[ChannelArgumentType] = None,
compression: Optional[grpc.Compression] = None,
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]] = None):
"""Creates a secure asynchronous Channel to a server.
Args:
target: The server address.
credentials: A ChannelCredentials instance.
options: An optional list of key-value pairs (channel args
in gRPC Core runtime) to configure the channel.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel. This is an EXPERIMENTAL option.
interceptors: An optional sequence of interceptors that will be executed for
any call executed with this channel.
Returns:
An aio.Channel.
"""
return Channel(target, () if options is None else options,
credentials._credentials, compression, interceptors)
################################### __all__ ################################# ################################### __all__ #################################
__all__ = ('AioRpcError', 'RpcContext', 'Call', 'UnaryUnaryCall', __all__ = (
'UnaryStreamCall', 'init_grpc_aio', 'Channel', 'AioRpcError',
'UnaryUnaryMultiCallable', 'ClientCallDetails', 'RpcContext',
'UnaryUnaryClientInterceptor', 'InterceptedUnaryUnaryCall', 'Call',
'ServerInterceptor', 'insecure_channel', 'server', 'Server', 'EOF', 'UnaryUnaryCall',
'secure_channel', 'AbortError', 'BaseError', 'UsageError') 'UnaryStreamCall',
'StreamUnaryCall',
'StreamStreamCall',
'init_grpc_aio',
'Channel',
'UnaryUnaryMultiCallable',
'UnaryStreamMultiCallable',
'StreamUnaryMultiCallable',
'StreamStreamMultiCallable',
'ClientCallDetails',
'UnaryUnaryClientInterceptor',
'InterceptedUnaryUnaryCall',
'ServerInterceptor',
'insecure_channel',
'server',
'Server',
'ServicerContext',
'EOF',
'secure_channel',
'AbortError',
'BaseError',
'UsageError',
'InternalError',
)

@ -0,0 +1,345 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for Channel objects and Multicallable objects."""
import abc
from typing import Any, AsyncIterable, Optional
import grpc
from . import _base_call
from ._typing import DeserializingFunction, MetadataType, SerializingFunction
_IMMUTABLE_EMPTY_TUPLE = tuple()
class UnaryUnaryMultiCallable(abc.ABC):
"""Enables asynchronous invocation of a unary-call RPC."""
@abc.abstractmethod
def __call__(self,
request: Any,
*,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A UnaryUnaryCall object.
Raises:
RpcError: Indicates that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
class UnaryStreamMultiCallable(abc.ABC):
"""Enables asynchronous invocation of a server-streaming RPC."""
@abc.abstractmethod
def __call__(self,
request: Any,
*,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A UnaryStreamCall object.
Raises:
RpcError: Indicates that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
class StreamUnaryMultiCallable(abc.ABC):
"""Enables asynchronous invocation of a client-streaming RPC."""
@abc.abstractmethod
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.StreamUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A StreamUnaryCall object.
Raises:
RpcError: Indicates that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
class StreamStreamMultiCallable(abc.ABC):
"""Enables asynchronous invocation of a bidirectional-streaming RPC."""
@abc.abstractmethod
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None
) -> _base_call.StreamStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A StreamStreamCall object.
Raises:
RpcError: Indicates that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
class Channel(abc.ABC):
"""Enables asynchronous RPC invocation as a client.
Channel objects implement the Asynchronous Context Manager (aka. async
with) type, although they are not supportted to be entered and exited
multiple times.
"""
@abc.abstractmethod
async def __aenter__(self):
"""Starts an asynchronous context manager.
Returns:
Channel the channel that was instantiated.
"""
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Finishes the asynchronous context manager by closing the channel.
Still active RPCs will be cancelled.
"""
@abc.abstractmethod
async def close(self, grace: Optional[float] = None):
"""Closes this Channel and releases all resources held by it.
This method immediately stops the channel from executing new RPCs in
all cases.
If a grace period is specified, this method wait until all active
RPCs are finshed, once the grace period is reached the ones that haven't
been terminated are cancelled. If a grace period is not specified
(by passing None for grace), all existing RPCs are cancelled immediately.
This method is idempotent.
"""
@abc.abstractmethod
def get_state(self,
try_to_connect: bool = False) -> grpc.ChannelConnectivity:
"""Checks the connectivity state of a channel.
This is an EXPERIMENTAL API.
If the channel reaches a stable connectivity state, it is guaranteed
that the return value of this function will eventually converge to that
state.
Args:
try_to_connect: a bool indicate whether the Channel should try to
connect to peer or not.
Returns: A ChannelConnectivity object.
"""
@abc.abstractmethod
async def wait_for_state_change(
self,
last_observed_state: grpc.ChannelConnectivity,
) -> None:
"""Waits for a change in connectivity state.
This is an EXPERIMENTAL API.
The function blocks until there is a change in the channel connectivity
state from the "last_observed_state". If the state is already
different, this function will return immediately.
There is an inherent race between the invocation of
"Channel.wait_for_state_change" and "Channel.get_state". The state can
change arbitrary many times during the race, so there is no way to
observe every state transition.
If there is a need to put a timeout for this function, please refer to
"asyncio.wait_for".
Args:
last_observed_state: A grpc.ChannelConnectivity object representing
the last known state.
"""
@abc.abstractmethod
async def channel_ready(self) -> None:
"""Creates a coroutine that blocks until the Channel is READY."""
@abc.abstractmethod
def unary_unary(
self,
method: str,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> UnaryUnaryMultiCallable:
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
@abc.abstractmethod
def unary_stream(
self,
method: str,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> UnaryStreamMultiCallable:
"""Creates a UnaryStreamMultiCallable for a unary-stream method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A UnarySteramMultiCallable value for the named unary-stream method.
"""
@abc.abstractmethod
def stream_unary(
self,
method: str,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> StreamUnaryMultiCallable:
"""Creates a StreamUnaryMultiCallable for a stream-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary method.
"""
@abc.abstractmethod
def stream_stream(
self,
method: str,
request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None
) -> StreamStreamMultiCallable:
"""Creates a StreamStreamMultiCallable for a stream-stream method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A StreamStreamMultiCallable value for the named stream-stream method.
"""

@ -0,0 +1,254 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for server-side classes."""
import abc
from typing import Generic, Optional, Sequence
import grpc
from ._typing import MetadataType, RequestType, ResponseType
class Server(abc.ABC):
"""Serves RPCs."""
@abc.abstractmethod
def add_generic_rpc_handlers(
self,
generic_rpc_handlers: Sequence[grpc.GenericRpcHandler]) -> None:
"""Registers GenericRpcHandlers with this Server.
This method is only safe to call before the server is started.
Args:
generic_rpc_handlers: A sequence of GenericRpcHandlers that will be
used to service RPCs.
"""
@abc.abstractmethod
def add_insecure_port(self, address: str) -> int:
"""Opens an insecure port for accepting RPCs.
A port is a communication endpoint that used by networking protocols,
like TCP and UDP. To date, we only support TCP.
This method may only be called before starting the server.
Args:
address: The address for which to open a port. If the port is 0,
or not specified in the address, then the gRPC runtime will choose a port.
Returns:
An integer port on which the server will accept RPC requests.
"""
@abc.abstractmethod
def add_secure_port(self, address: str,
server_credentials: grpc.ServerCredentials) -> int:
"""Opens a secure port for accepting RPCs.
A port is a communication endpoint that used by networking protocols,
like TCP and UDP. To date, we only support TCP.
This method may only be called before starting the server.
Args:
address: The address for which to open a port.
if the port is 0, or not specified in the address, then the gRPC
runtime will choose a port.
server_credentials: A ServerCredentials object.
Returns:
An integer port on which the server will accept RPC requests.
"""
@abc.abstractmethod
async def start(self) -> None:
"""Starts this Server.
This method may only be called once. (i.e. it is not idempotent).
"""
@abc.abstractmethod
async def stop(self, grace: Optional[float]) -> None:
"""Stops this Server.
This method immediately stops the server from servicing new RPCs in
all cases.
If a grace period is specified, this method returns immediately and all
RPCs active at the end of the grace period are aborted. If a grace
period is not specified (by passing None for grace), all existing RPCs
are aborted immediately and this method blocks until the last RPC
handler terminates.
This method is idempotent and may be called at any time. Passing a
smaller grace value in a subsequent call will have the effect of
stopping the Server sooner (passing None will have the effect of
stopping the server immediately). Passing a larger grace value in a
subsequent call will not have the effect of stopping the server later
(i.e. the most restrictive grace value is used).
Args:
grace: A duration of time in seconds or None.
"""
@abc.abstractmethod
async def wait_for_termination(self,
timeout: Optional[float] = None) -> bool:
"""Continues current coroutine once the server stops.
This is an EXPERIMENTAL API.
The wait will not consume computational resources during blocking, and
it will block until one of the two following conditions are met:
1) The server is stopped or terminated;
2) A timeout occurs if timeout is not `None`.
The timeout argument works in the same way as `threading.Event.wait()`.
https://docs.python.org/3/library/threading.html#threading.Event.wait
Args:
timeout: A floating point number specifying a timeout for the
operation in seconds.
Returns:
A bool indicates if the operation times out.
"""
class ServicerContext(Generic[RequestType, ResponseType], abc.ABC):
"""A context object passed to method implementations."""
@abc.abstractmethod
async def read(self) -> RequestType:
"""Reads one message from the RPC.
Only one read operation is allowed simultaneously.
Returns:
A response message of the RPC.
Raises:
An RpcError exception if the read failed.
"""
@abc.abstractmethod
async def write(self, message: ResponseType) -> None:
"""Writes one message to the RPC.
Only one write operation is allowed simultaneously.
Raises:
An RpcError exception if the write failed.
"""
@abc.abstractmethod
async def send_initial_metadata(self,
initial_metadata: MetadataType) -> None:
"""Sends the initial metadata value to the client.
This method need not be called by implementations if they have no
metadata to add to what the gRPC runtime will transmit.
Args:
initial_metadata: The initial :term:`metadata`.
"""
@abc.abstractmethod
async def abort(self, code: grpc.StatusCode, details: str,
trailing_metadata: MetadataType) -> None:
"""Raises an exception to terminate the RPC with a non-OK status.
The code and details passed as arguments will supercede any existing
ones.
Args:
code: A StatusCode object to be sent to the client.
It must not be StatusCode.OK.
details: A UTF-8-encodable string to be sent to the client upon
termination of the RPC.
trailing_metadata: A sequence of tuple represents the trailing
:term:`metadata`.
Raises:
Exception: An exception is always raised to signal the abortion the
RPC to the gRPC runtime.
"""
@abc.abstractmethod
async def set_trailing_metadata(self,
trailing_metadata: MetadataType) -> None:
"""Sends the trailing metadata for the RPC.
This method need not be called by implementations if they have no
metadata to add to what the gRPC runtime will transmit.
Args:
trailing_metadata: The trailing :term:`metadata`.
"""
@abc.abstractmethod
def invocation_metadata(self) -> Optional[MetadataType]:
"""Accesses the metadata from the sent by the client.
Returns:
The invocation :term:`metadata`.
"""
@abc.abstractmethod
def set_code(self, code: grpc.StatusCode) -> None:
"""Sets the value to be used as status code upon RPC completion.
This method need not be called by method implementations if they wish
the gRPC runtime to determine the status code of the RPC.
Args:
code: A StatusCode object to be sent to the client.
"""
@abc.abstractmethod
def set_details(self, details: str) -> None:
"""Sets the value to be used the as detail string upon RPC completion.
This method need not be called by method implementations if they have
no details to transmit.
Args:
details: A UTF-8-encodable string to be sent to the client upon
termination of the RPC.
"""
@abc.abstractmethod
def set_compression(self, compression: grpc.Compression) -> None:
"""Set the compression algorithm to be used for the entire call.
This is an EXPERIMENTAL method.
Args:
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip.
"""
@abc.abstractmethod
def disable_next_message_compression(self) -> None:
"""Disables compression for the next response message.
This is an EXPERIMENTAL method.
This method will override any compression configuration set during
server creation or set on the call.
"""

@ -21,7 +21,7 @@ import grpc
from grpc import _common, _compression, _grpcio_metadata from grpc import _common, _compression, _grpcio_metadata
from grpc._cython import cygrpc from grpc._cython import cygrpc
from . import _base_call from . import _base_call, _base_channel
from ._call import (StreamStreamCall, StreamUnaryCall, UnaryStreamCall, from ._call import (StreamStreamCall, StreamUnaryCall, UnaryStreamCall,
UnaryUnaryCall) UnaryUnaryCall)
from ._interceptor import (InterceptedUnaryUnaryCall, from ._interceptor import (InterceptedUnaryUnaryCall,
@ -86,8 +86,8 @@ class _BaseMultiCallable:
self._interceptors = interceptors self._interceptors = interceptors
class UnaryUnaryMultiCallable(_BaseMultiCallable): class UnaryUnaryMultiCallable(_BaseMultiCallable,
"""Factory an asynchronous unary-unary RPC stub call from client-side.""" _base_channel.UnaryUnaryMultiCallable):
def __call__(self, def __call__(self,
request: Any, request: Any,
@ -98,29 +98,6 @@ class UnaryUnaryMultiCallable(_BaseMultiCallable):
wait_for_ready: Optional[bool] = None, wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryUnaryCall: ) -> _base_call.UnaryUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression: if compression:
metadata = _compression.augment_metadata(metadata, compression) metadata = _compression.augment_metadata(metadata, compression)
@ -140,8 +117,8 @@ class UnaryUnaryMultiCallable(_BaseMultiCallable):
return call return call
class UnaryStreamMultiCallable(_BaseMultiCallable): class UnaryStreamMultiCallable(_BaseMultiCallable,
"""Affords invoking a unary-stream RPC from client-side in an asynchronous way.""" _base_channel.UnaryStreamMultiCallable):
def __call__(self, def __call__(self,
request: Any, request: Any,
@ -152,24 +129,6 @@ class UnaryStreamMultiCallable(_BaseMultiCallable):
wait_for_ready: Optional[bool] = None, wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None compression: Optional[grpc.Compression] = None
) -> _base_call.UnaryStreamCall: ) -> _base_call.UnaryStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
"""
if compression: if compression:
metadata = _compression.augment_metadata(metadata, compression) metadata = _compression.augment_metadata(metadata, compression)
@ -183,8 +142,8 @@ class UnaryStreamMultiCallable(_BaseMultiCallable):
return call return call
class StreamUnaryMultiCallable(_BaseMultiCallable): class StreamUnaryMultiCallable(_BaseMultiCallable,
"""Affords invoking a stream-unary RPC from client-side in an asynchronous way.""" _base_channel.StreamUnaryMultiCallable):
def __call__(self, def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None, request_async_iterator: Optional[AsyncIterable[Any]] = None,
@ -194,29 +153,6 @@ class StreamUnaryMultiCallable(_BaseMultiCallable):
wait_for_ready: Optional[bool] = None, wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None compression: Optional[grpc.Compression] = None
) -> _base_call.StreamUnaryCall: ) -> _base_call.StreamUnaryCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression: if compression:
metadata = _compression.augment_metadata(metadata, compression) metadata = _compression.augment_metadata(metadata, compression)
@ -230,8 +166,8 @@ class StreamUnaryMultiCallable(_BaseMultiCallable):
return call return call
class StreamStreamMultiCallable(_BaseMultiCallable): class StreamStreamMultiCallable(_BaseMultiCallable,
"""Affords invoking a stream-stream RPC from client-side in an asynchronous way.""" _base_channel.StreamStreamMultiCallable):
def __call__(self, def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None, request_async_iterator: Optional[AsyncIterable[Any]] = None,
@ -241,29 +177,6 @@ class StreamStreamMultiCallable(_BaseMultiCallable):
wait_for_ready: Optional[bool] = None, wait_for_ready: Optional[bool] = None,
compression: Optional[grpc.Compression] = None compression: Optional[grpc.Compression] = None
) -> _base_call.StreamStreamCall: ) -> _base_call.StreamStreamCall:
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
A Call object instance which is an awaitable object.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
if compression: if compression:
metadata = _compression.augment_metadata(metadata, compression) metadata = _compression.augment_metadata(metadata, compression)
@ -277,11 +190,7 @@ class StreamStreamMultiCallable(_BaseMultiCallable):
return call return call
class Channel: class Channel(_base_channel.Channel):
"""Asynchronous Channel implementation.
A cygrpc.AioChannel-backed implementation.
"""
_loop: asyncio.AbstractEventLoop _loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel _channel: cygrpc.AioChannel
_unary_unary_interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]] _unary_unary_interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]]
@ -326,18 +235,9 @@ class Channel:
self._loop) self._loop)
async def __aenter__(self): async def __aenter__(self):
"""Starts an asynchronous context manager.
Returns:
Channel the channel that was instantiated.
"""
return self return self
async def __aexit__(self, exc_type, exc_val, exc_tb): async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Finishes the asynchronous context manager by closing the channel.
Still active RPCs will be cancelled.
"""
await self._close(None) await self._close(None)
async def _close(self, grace): async def _close(self, grace):
@ -392,35 +292,10 @@ class Channel:
self._channel.close() self._channel.close()
async def close(self, grace: Optional[float] = None): async def close(self, grace: Optional[float] = None):
"""Closes this Channel and releases all resources held by it.
This method immediately stops the channel from executing new RPCs in
all cases.
If a grace period is specified, this method wait until all active
RPCs are finshed, once the grace period is reached the ones that haven't
been terminated are cancelled. If a grace period is not specified
(by passing None for grace), all existing RPCs are cancelled immediately.
This method is idempotent.
"""
await self._close(grace) await self._close(grace)
def get_state(self, def get_state(self,
try_to_connect: bool = False) -> grpc.ChannelConnectivity: try_to_connect: bool = False) -> grpc.ChannelConnectivity:
"""Check the connectivity state of a channel.
This is an EXPERIMENTAL API.
If the channel reaches a stable connectivity state, it is guaranteed
that the return value of this function will eventually converge to that
state.
Args: try_to_connect: a bool indicate whether the Channel should try to
connect to peer or not.
Returns: A ChannelConnectivity object.
"""
result = self._channel.check_connectivity_state(try_to_connect) result = self._channel.check_connectivity_state(try_to_connect)
return _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[result] return _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[result]
@ -428,31 +303,10 @@ class Channel:
self, self,
last_observed_state: grpc.ChannelConnectivity, last_observed_state: grpc.ChannelConnectivity,
) -> None: ) -> None:
"""Wait for a change in connectivity state.
This is an EXPERIMENTAL API.
The function blocks until there is a change in the channel connectivity
state from the "last_observed_state". If the state is already
different, this function will return immediately.
There is an inherent race between the invocation of
"Channel.wait_for_state_change" and "Channel.get_state". The state can
change arbitrary times during the race, so there is no way to observe
every state transition.
If there is a need to put a timeout for this function, please refer to
"asyncio.wait_for".
Args:
last_observed_state: A grpc.ChannelConnectivity object representing
the last known state.
"""
assert await self._channel.watch_connectivity_state( assert await self._channel.watch_connectivity_state(
last_observed_state.value[0], None) last_observed_state.value[0], None)
async def channel_ready(self) -> None: async def channel_ready(self) -> None:
"""Creates a coroutine that ends when a Channel is ready."""
state = self.get_state(try_to_connect=True) state = self.get_state(try_to_connect=True)
while state != grpc.ChannelConnectivity.READY: while state != grpc.ChannelConnectivity.READY:
await self.wait_for_state_change(state) await self.wait_for_state_change(state)
@ -464,19 +318,6 @@ class Channel:
request_serializer: Optional[SerializingFunction] = None, request_serializer: Optional[SerializingFunction] = None,
response_deserializer: Optional[DeserializingFunction] = None response_deserializer: Optional[DeserializingFunction] = None
) -> UnaryUnaryMultiCallable: ) -> UnaryUnaryMultiCallable:
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
return UnaryUnaryMultiCallable(self._channel, _common.encode(method), return UnaryUnaryMultiCallable(self._channel, _common.encode(method),
request_serializer, request_serializer,
response_deserializer, response_deserializer,
@ -513,3 +354,51 @@ class Channel:
request_serializer, request_serializer,
response_deserializer, None, response_deserializer, None,
self._loop) self._loop)
def insecure_channel(
target: str,
options: Optional[ChannelArgumentType] = None,
compression: Optional[grpc.Compression] = None,
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]] = None):
"""Creates an insecure asynchronous Channel to a server.
Args:
target: The server address
options: An optional list of key-value pairs (channel args
in gRPC Core runtime) to configure the channel.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel. This is an EXPERIMENTAL option.
interceptors: An optional sequence of interceptors that will be executed for
any call executed with this channel.
Returns:
A Channel.
"""
return Channel(target, () if options is None else options, None,
compression, interceptors)
def secure_channel(
target: str,
credentials: grpc.ChannelCredentials,
options: Optional[ChannelArgumentType] = None,
compression: Optional[grpc.Compression] = None,
interceptors: Optional[Sequence[UnaryUnaryClientInterceptor]] = None):
"""Creates a secure asynchronous Channel to a server.
Args:
target: The server address.
credentials: A ChannelCredentials instance.
options: An optional list of key-value pairs (channel args
in gRPC Core runtime) to configure the channel.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel. This is an EXPERIMENTAL option.
interceptors: An optional sequence of interceptors that will be executed for
any call executed with this channel.
Returns:
An aio.Channel.
"""
return Channel(target, () if options is None else options,
credentials._credentials, compression, interceptors)

@ -63,6 +63,19 @@ class ClientCallDetails(
'ClientCallDetails', 'ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')), ('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')),
grpc.ClientCallDetails): grpc.ClientCallDetails):
"""Describes an RPC to be invoked.
This is an EXPERIMENTAL API.
Args:
method: The method name of the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional metadata to be transmitted to the service-side of
the RPC.
credentials: An optional CallCredentials for the RPC.
wait_for_ready: This is an EXPERIMENTAL argument. An optional flag to
enable wait for ready mechanism.
"""
method: str method: str
timeout: Optional[float] timeout: Optional[float]
@ -81,6 +94,7 @@ class UnaryUnaryClientInterceptor(metaclass=ABCMeta):
client_call_details: ClientCallDetails, client_call_details: ClientCallDetails,
request: RequestType) -> Union[UnaryUnaryCall, ResponseType]: request: RequestType) -> Union[UnaryUnaryCall, ResponseType]:
"""Intercepts a unary-unary invocation asynchronously. """Intercepts a unary-unary invocation asynchronously.
Args: Args:
continuation: A coroutine that proceeds with the invocation by continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in chain or invoking the executing the next interceptor in chain or invoking the
@ -93,8 +107,10 @@ class UnaryUnaryClientInterceptor(metaclass=ABCMeta):
client_call_details: A ClientCallDetails object describing the client_call_details: A ClientCallDetails object describing the
outgoing RPC. outgoing RPC.
request: The request value for the RPC. request: The request value for the RPC.
Returns: Returns:
An object with the RPC response. An object with the RPC response.
Raises: Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status. AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled. asyncio.CancelledError: Indicating that the RPC was canceled.

@ -21,6 +21,7 @@ import grpc
from grpc import _common, _compression from grpc import _common, _compression
from grpc._cython import cygrpc from grpc._cython import cygrpc
from . import _base_server
from ._typing import ChannelArgumentType from ._typing import ChannelArgumentType
@ -30,7 +31,7 @@ def _augment_channel_arguments(base_options: ChannelArgumentType,
return tuple(base_options) + compression_option return tuple(base_options) + compression_option
class Server: class Server(_base_server.Server):
"""Serves RPCs.""" """Serves RPCs."""
def __init__(self, thread_pool: Optional[Executor], def __init__(self, thread_pool: Optional[Executor],

@ -16,7 +16,10 @@ py_grpc_library(
py_library( py_library(
name = "grpc_health", name = "grpc_health",
srcs = ["health.py"], srcs = [
"_async.py",
"health.py",
],
imports = ["../../"], imports = ["../../"],
deps = [ deps = [
":health_py_pb2", ":health_py_pb2",

@ -0,0 +1,113 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference implementation for health checking in gRPC Python."""
import asyncio
import collections
from typing import MutableMapping
import grpc
from grpc_health.v1 import health_pb2 as _health_pb2
from grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc
class HealthServicer(_health_pb2_grpc.HealthServicer):
"""An AsyncIO implementation of health checking servicer."""
_server_status: MutableMapping[
str, '_health_pb2.HealthCheckResponse.ServingStatus']
_server_watchers: MutableMapping[str, asyncio.Condition]
_gracefully_shutting_down: bool
def __init__(self) -> None:
self._server_status = dict()
self._server_watchers = collections.defaultdict(asyncio.Condition)
self._gracefully_shutting_down = False
async def Check(self, request: _health_pb2.HealthCheckRequest,
context) -> None:
status = self._server_status.get(request.service)
if status is None:
await context.abort(grpc.StatusCode.NOT_FOUND)
else:
return _health_pb2.HealthCheckResponse(status=status)
async def Watch(self, request: _health_pb2.HealthCheckRequest,
context) -> None:
condition = self._server_watchers[request.service]
last_status = None
try:
async with condition:
while True:
status = self._server_status.get(
request.service,
_health_pb2.HealthCheckResponse.SERVICE_UNKNOWN)
# NOTE(lidiz) If the observed status is the same, it means
# there are missing intermediate statuses. It's considered
# acceptable since peer only interested in eventual status.
if status != last_status:
# Responds with current health state
await context.write(
_health_pb2.HealthCheckResponse(status=status))
# Records the last sent status
last_status = status
# Polling on health state changes
await condition.wait()
finally:
if request.service in self._server_watchers:
del self._server_watchers[request.service]
async def _set(self, service: str,
status: _health_pb2.HealthCheckResponse.ServingStatus
) -> None:
if service in self._server_watchers:
condition = self._server_watchers.get(service)
async with condition:
self._server_status[service] = status
condition.notify_all()
else:
self._server_status[service] = status
async def set(self, service: str,
status: _health_pb2.HealthCheckResponse.ServingStatus
) -> None:
"""Sets the status of a service.
Args:
service: string, the name of the service.
status: HealthCheckResponse.status enum value indicating the status of
the service
"""
if self._gracefully_shutting_down:
return
else:
await self._set(service, status)
async def enter_graceful_shutdown(self) -> None:
"""Permanently sets the status of all services to NOT_SERVING.
This should be invoked when the server is entering a graceful shutdown
period. After this method is invoked, future attempts to set the status
of a service will be ignored.
"""
if self._gracefully_shutting_down:
return
else:
self._gracefully_shutting_down = True
for service in self._server_status:
await self._set(service,
_health_pb2.HealthCheckResponse.NOT_SERVING)

@ -15,13 +15,20 @@
import collections import collections
import threading import threading
import sys
import grpc import grpc
from grpc_health.v1 import health_pb2 as _health_pb2 from grpc_health.v1 import health_pb2 as _health_pb2
from grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc from grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
# Exposes AsyncHealthServicer as public API.
from . import _async as aio # pylint: disable=unused-import
# The service name of the health checking servicer.
SERVICE_NAME = _health_pb2.DESCRIPTOR.services_by_name['Health'].full_name SERVICE_NAME = _health_pb2.DESCRIPTOR.services_by_name['Health'].full_name
# The entry of overall health for the entire server.
OVERALL_HEALTH = ''
class _Watcher(): class _Watcher():
@ -131,7 +138,7 @@ class HealthServicer(_health_pb2_grpc.HealthServicer):
"""Sets the status of a service. """Sets the status of a service.
Args: Args:
service: string, the name of the service. NOTE, '' must be set. service: string, the name of the service.
status: HealthCheckResponse.status enum value indicating the status of status: HealthCheckResponse.status enum value indicating the status of
the service the service
""" """

@ -106,6 +106,37 @@ class TestLite(setuptools.Command):
self.distribution.fetch_build_eggs(self.distribution.tests_require) self.distribution.fetch_build_eggs(self.distribution.tests_require)
class TestPy3Only(setuptools.Command):
"""Command to run tests for Python 3+ features.
This does not include asyncio tests, which are housed in a separate
directory.
"""
description = 'run tests for py3+ features'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests_py3_only'])
runner = tests.Runner()
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class TestAio(setuptools.Command): class TestAio(setuptools.Command):
"""Command to run aio tests without fetching or building anything.""" """Command to run aio tests without fetching or building anything."""

@ -59,6 +59,7 @@ COMMAND_CLASS = {
'test_lite': commands.TestLite, 'test_lite': commands.TestLite,
'test_gevent': commands.TestGevent, 'test_gevent': commands.TestGevent,
'test_aio': commands.TestAio, 'test_aio': commands.TestAio,
'test_py3_only': commands.TestPy3Only,
} }
PACKAGE_DATA = { PACKAGE_DATA = {

@ -0,0 +1,27 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(
default_testonly = 1,
default_visibility = ["//visibility:public"],
)
py_library(
name = "histogram",
srcs = ["histogram.py"],
srcs_version = "PY2AND3",
deps = [
"//src/proto/grpc/testing:stats_py_pb2",
],
)

@ -65,6 +65,16 @@ class Histogram(object):
data.count = self._count data.count = self._count
return data return data
def merge(self, another_data):
with self._lock:
for i in range(len(self._buckets)):
self._buckets[i] += another_data.bucket[i]
self._min = min(self._min, another_data.min_seen)
self._max = max(self._max, another_data.max_seen)
self._sum += another_data.sum
self._sum_of_squares += another_data.sum_of_squares
self._count += another_data.count
def _bucket_for(self, val): def _bucket_for(self, val):
val = min(val, self._max_possible) val = min(val, self._max_possible)
return int(math.log(val, self.multiplier)) return int(math.log(val, self.multiplier))

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import itertools
import threading
import unittest import unittest
import logging import logging
@ -35,26 +33,6 @@ _STREAM_STREAM = '/test/StreamStream'
_DEFECTIVE_GENERIC_RPC_HANDLER = '/test/DefectiveGenericRpcHandler' _DEFECTIVE_GENERIC_RPC_HANDLER = '/test/DefectiveGenericRpcHandler'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object): class _Handler(object):
def __init__(self, control): def __init__(self, control):
@ -199,6 +177,7 @@ def _defective_handler_multi_callable(channel):
class InvocationDefectsTest(unittest.TestCase): class InvocationDefectsTest(unittest.TestCase):
"""Tests the handling of exception-raising user code on the client-side."""
def setUp(self): def setUp(self):
self._control = test_control.PauseFailControl() self._control = test_control.PauseFailControl()
@ -216,35 +195,44 @@ class InvocationDefectsTest(unittest.TestCase):
self._channel.close() self._channel.close()
def testIterableStreamRequestBlockingUnaryResponse(self): def testIterableStreamRequestBlockingUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)] requests = object()
multi_callable = _stream_unary_multi_callable(self._channel) multi_callable = _stream_unary_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError): with self.assertRaises(grpc.RpcError) as exception_context:
response = multi_callable( multi_callable(
requests, requests,
metadata=(('test', metadata=(('test',
'IterableStreamRequestBlockingUnaryResponse'),)) 'IterableStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testIterableStreamRequestFutureUnaryResponse(self): def testIterableStreamRequestFutureUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)] requests = object()
multi_callable = _stream_unary_multi_callable(self._channel) multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future( response_future = multi_callable.future(
requests, requests,
metadata=(('test', 'IterableStreamRequestFutureUnaryResponse'),)) metadata=(('test', 'IterableStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.RpcError): with self.assertRaises(grpc.RpcError) as exception_context:
response = response_future.result() response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testIterableStreamRequestStreamResponse(self): def testIterableStreamRequestStreamResponse(self):
requests = [b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)] requests = object()
multi_callable = _stream_stream_multi_callable(self._channel) multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable( response_iterator = multi_callable(
requests, requests,
metadata=(('test', 'IterableStreamRequestStreamResponse'),)) metadata=(('test', 'IterableStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError): with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator) next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testIteratorStreamRequestStreamResponse(self): def testIteratorStreamRequestStreamResponse(self):
requests_iterator = FailAfterFewIterationsCounter( requests_iterator = FailAfterFewIterationsCounter(
test_constants.STREAM_LENGTH // 2, b'\x07\x08') test_constants.STREAM_LENGTH // 2, b'\x07\x08')
@ -253,18 +241,21 @@ class InvocationDefectsTest(unittest.TestCase):
requests_iterator, requests_iterator,
metadata=(('test', 'IteratorStreamRequestStreamResponse'),)) metadata=(('test', 'IteratorStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError): with self.assertRaises(grpc.RpcError) as exception_context:
for _ in range(test_constants.STREAM_LENGTH // 2 + 1): for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
next(response_iterator) next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testDefectiveGenericRpcHandlerUnaryResponse(self): def testDefectiveGenericRpcHandlerUnaryResponse(self):
request = b'\x07\x08' request = b'\x07\x08'
multi_callable = _defective_handler_multi_callable(self._channel) multi_callable = _defective_handler_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context: with self.assertRaises(grpc.RpcError) as exception_context:
response = multi_callable( multi_callable(request,
request, metadata=(('test',
metadata=(('test', 'DefectiveGenericRpcHandlerUnary'),)) 'DefectiveGenericRpcHandlerUnary'),))
self.assertIs(grpc.StatusCode.UNKNOWN, self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code()) exception_context.exception.code())

@ -17,16 +17,67 @@ package(
default_visibility = ["//visibility:public"], default_visibility = ["//visibility:public"],
) )
py_binary( py_library(
name = "server", name = "benchmark_client",
srcs = ["server.py"], srcs = ["benchmark_client.py"],
python_version = "PY3", srcs_version = "PY3",
deps = [ deps = [
"//src/proto/grpc/testing:benchmark_service_py_pb2",
"//src/proto/grpc/testing:benchmark_service_py_pb2_grpc", "//src/proto/grpc/testing:benchmark_service_py_pb2_grpc",
"//src/proto/grpc/testing:py_messages_proto", "//src/proto/grpc/testing:py_messages_proto",
"//src/python/grpcio/grpc:grpcio", "//src/python/grpcio/grpc:grpcio",
"//src/python/grpcio_tests/tests/qps:histogram",
"//src/python/grpcio_tests/tests/unit:resources",
],
)
py_library(
name = "benchmark_servicer",
srcs = ["benchmark_servicer.py"],
srcs_version = "PY3",
deps = [
"//src/proto/grpc/testing:benchmark_service_py_pb2_grpc",
"//src/proto/grpc/testing:py_messages_proto",
"//src/python/grpcio/grpc:grpcio",
],
)
py_library(
name = "worker_servicer",
srcs = ["worker_servicer.py"],
data = [
"//src/python/grpcio_tests/tests/unit/credentials",
],
srcs_version = "PY3",
deps = [
":benchmark_client",
":benchmark_servicer",
"//src/proto/grpc/core:stats_py_pb2",
"//src/proto/grpc/testing:benchmark_service_py_pb2_grpc",
"//src/proto/grpc/testing:control_py_pb2",
"//src/proto/grpc/testing:payloads_py_pb2",
"//src/proto/grpc/testing:stats_py_pb2",
"//src/proto/grpc/testing:worker_service_py_pb2_grpc",
"//src/python/grpcio/grpc:grpcio",
"//src/python/grpcio_tests/tests/qps:histogram",
"//src/python/grpcio_tests/tests/unit:resources",
"//src/python/grpcio_tests/tests/unit/framework/common", "//src/python/grpcio_tests/tests/unit/framework/common",
"@six", ],
)
py_binary(
name = "server",
srcs = ["server.py"],
python_version = "PY3",
deps = [":benchmark_servicer"],
)
py_binary(
name = "worker",
srcs = ["worker.py"],
imports = ["../../"],
python_version = "PY3",
deps = [
":worker_servicer",
"//src/proto/grpc/testing:worker_service_py_pb2_grpc",
], ],
) )

@ -0,0 +1,155 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python AsyncIO Benchmark Clients."""
import abc
import asyncio
import time
import logging
import random
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import (benchmark_service_pb2_grpc, control_pb2,
messages_pb2)
from tests.qps import histogram
from tests.unit import resources
class GenericStub(object):
def __init__(self, channel: aio.Channel):
self.UnaryCall = channel.unary_unary(
'/grpc.testing.BenchmarkService/UnaryCall')
self.StreamingCall = channel.stream_stream(
'/grpc.testing.BenchmarkService/StreamingCall')
class BenchmarkClient(abc.ABC):
"""Benchmark client interface that exposes a non-blocking send_request()."""
def __init__(self, address: str, config: control_pb2.ClientConfig,
hist: histogram.Histogram):
# Disables underlying reuse of subchannels
unique_option = (('iv', random.random()),)
# Parses the channel argument from config
channel_args = tuple(
(arg.name, arg.str_value) if arg.HasField('str_value') else (
arg.name, int(arg.int_value)) for arg in config.channel_args)
# Creates the channel
if config.HasField('security_params'):
channel_credentials = grpc.ssl_channel_credentials(
resources.test_root_certificates(),)
server_host_override_option = ((
'grpc.ssl_target_name_override',
config.security_params.server_host_override,
),)
self._channel = aio.secure_channel(
address, channel_credentials,
unique_option + channel_args + server_host_override_option)
else:
self._channel = aio.insecure_channel(address,
options=unique_option +
channel_args)
# Creates the stub
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
self._stub = benchmark_service_pb2_grpc.BenchmarkServiceStub(
self._channel)
payload = messages_pb2.Payload(
body=b'\0' * config.payload_config.simple_params.req_size)
self._request = messages_pb2.SimpleRequest(
payload=payload,
response_size=config.payload_config.simple_params.resp_size)
else:
self._generic = True
self._stub = GenericStub(self._channel)
self._request = b'\0' * config.payload_config.bytebuf_params.req_size
self._hist = hist
self._response_callbacks = []
self._concurrency = config.outstanding_rpcs_per_channel
async def run(self) -> None:
await self._channel.channel_ready()
async def stop(self) -> None:
await self._channel.close()
def _record_query_time(self, query_time: float) -> None:
self._hist.add(query_time * 1e9)
class UnaryAsyncBenchmarkClient(BenchmarkClient):
def __init__(self, address: str, config: control_pb2.ClientConfig,
hist: histogram.Histogram):
super().__init__(address, config, hist)
self._running = None
self._stopped = asyncio.Event()
async def _send_request(self):
start_time = time.monotonic()
await self._stub.UnaryCall(self._request)
self._record_query_time(time.monotonic() - start_time)
async def _send_indefinitely(self) -> None:
while self._running:
await self._send_request()
async def run(self) -> None:
await super().run()
self._running = True
senders = (self._send_indefinitely() for _ in range(self._concurrency))
await asyncio.gather(*senders)
self._stopped.set()
async def stop(self) -> None:
self._running = False
await self._stopped.wait()
await super().stop()
class StreamingAsyncBenchmarkClient(BenchmarkClient):
def __init__(self, address: str, config: control_pb2.ClientConfig,
hist: histogram.Histogram):
super().__init__(address, config, hist)
self._running = None
self._stopped = asyncio.Event()
async def _one_streaming_call(self):
call = self._stub.StreamingCall()
while self._running:
start_time = time.time()
await call.write(self._request)
await call.read()
self._record_query_time(time.time() - start_time)
await call.done_writing()
async def run(self):
await super().run()
self._running = True
senders = (self._one_streaming_call() for _ in range(self._concurrency))
await asyncio.gather(*senders)
self._stopped.set()
async def stop(self):
self._running = False
await self._stopped.wait()
await super().stop()

@ -0,0 +1,55 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python AsyncIO Benchmark Servicers."""
import asyncio
import logging
import unittest
from grpc.experimental import aio
from src.proto.grpc.testing import benchmark_service_pb2_grpc, messages_pb2
class BenchmarkServicer(benchmark_service_pb2_grpc.BenchmarkServiceServicer):
async def UnaryCall(self, request, unused_context):
payload = messages_pb2.Payload(body=b'\0' * request.response_size)
return messages_pb2.SimpleResponse(payload=payload)
async def StreamingFromServer(self, request, unused_context):
payload = messages_pb2.Payload(body=b'\0' * request.response_size)
# Sends response at full capacity!
while True:
yield messages_pb2.SimpleResponse(payload=payload)
async def StreamingCall(self, request_iterator, unused_context):
async for request in request_iterator:
payload = messages_pb2.Payload(body=b'\0' * request.response_size)
yield messages_pb2.SimpleResponse(payload=payload)
class GenericBenchmarkServicer(
benchmark_service_pb2_grpc.BenchmarkServiceServicer):
"""Generic (no-codec) Server implementation for the Benchmark service."""
def __init__(self, resp_size):
self._response = '\0' * resp_size
async def UnaryCall(self, unused_request, unused_context):
return self._response
async def StreamingCall(self, request_iterator, unused_context):
async for _ in request_iterator:
yield self._response

@ -17,28 +17,16 @@ import logging
import unittest import unittest
from grpc.experimental import aio from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import benchmark_service_pb2_grpc
class BenchmarkServer(benchmark_service_pb2_grpc.BenchmarkServiceServicer):
async def UnaryCall(self, request, context): from src.proto.grpc.testing import benchmark_service_pb2_grpc
payload = messages_pb2.Payload(body=b'\0' * request.response_size) from tests_aio.benchmark import benchmark_servicer
return messages_pb2.SimpleResponse(payload=payload)
async def StreamingFromServer(self, request, context):
payload = messages_pb2.Payload(body=b'\0' * request.response_size)
# Sends response at full capacity!
while True:
yield messages_pb2.SimpleResponse(payload=payload)
async def _start_async_server(): async def _start_async_server():
server = aio.server() server = aio.server()
port = server.add_insecure_port('localhost:%s' % 50051) port = server.add_insecure_port('localhost:%s' % 50051)
servicer = BenchmarkServer() servicer = benchmark_servicer.BenchmarkServicer()
benchmark_service_pb2_grpc.add_BenchmarkServiceServicer_to_server( benchmark_service_pb2_grpc.add_BenchmarkServiceServicer_to_server(
servicer, server) servicer, server)

@ -0,0 +1,58 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
import logging
from grpc.experimental import aio
from src.proto.grpc.testing import worker_service_pb2_grpc
from tests_aio.benchmark import worker_servicer
async def run_worker_server(port: int) -> None:
aio.init_grpc_aio()
server = aio.server()
servicer = worker_servicer.WorkerServicer()
worker_service_pb2_grpc.add_WorkerServiceServicer_to_server(
servicer, server)
server.add_insecure_port('[::]:{}'.format(port))
await server.start()
await servicer.wait_for_quit()
await server.stop(None)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='gRPC Python performance testing worker')
parser.add_argument('--driver_port',
type=int,
dest='port',
help='The port the worker should listen on')
parser.add_argument('--uvloop',
action='store_true',
help='Use uvloop or not')
args = parser.parse_args()
if args.uvloop:
import uvloop
uvloop.install()
asyncio.get_event_loop().run_until_complete(run_worker_server(args.port))

@ -0,0 +1,367 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import collections
import logging
import multiprocessing
import os
import sys
import time
from typing import Tuple
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import (benchmark_service_pb2_grpc, control_pb2,
stats_pb2, worker_service_pb2_grpc)
from tests.qps import histogram
from tests.unit import resources
from tests.unit.framework.common import get_socket
from tests_aio.benchmark import benchmark_client, benchmark_servicer
_NUM_CORES = multiprocessing.cpu_count()
_WORKER_ENTRY_FILE = os.path.join(
os.path.split(os.path.abspath(__file__))[0], 'worker.py')
_LOGGER = logging.getLogger(__name__)
class _SubWorker(
collections.namedtuple('_SubWorker',
['process', 'port', 'channel', 'stub'])):
"""A data class that holds information about a child qps worker."""
def _repr(self):
return f'<_SubWorker pid={self.process.pid} port={self.port}>'
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def _get_server_status(start_time: float, end_time: float,
port: int) -> control_pb2.ServerStatus:
"""Creates ServerStatus proto message."""
end_time = time.monotonic()
elapsed_time = end_time - start_time
# TODO(lidiz) Collect accurate time system to compute QPS/core-second.
stats = stats_pb2.ServerStats(time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ServerStatus(stats=stats, port=port, cores=_NUM_CORES)
def _create_server(config: control_pb2.ServerConfig) -> Tuple[aio.Server, int]:
"""Creates a server object according to the ServerConfig."""
channel_args = tuple(
(arg.name,
arg.str_value) if arg.HasField('str_value') else (arg.name,
int(arg.int_value))
for arg in config.channel_args)
server = aio.server(options=channel_args + (('grpc.so_reuseport', 1),))
if config.server_type == control_pb2.ASYNC_SERVER:
servicer = benchmark_servicer.BenchmarkServicer()
benchmark_service_pb2_grpc.add_BenchmarkServiceServicer_to_server(
servicer, server)
elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
resp_size = config.payload_config.bytebuf_params.resp_size
servicer = benchmark_servicer.GenericBenchmarkServicer(resp_size)
method_implementations = {
'StreamingCall':
grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
'UnaryCall':
grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
}
handler = grpc.method_handlers_generic_handler(
'grpc.testing.BenchmarkService', method_implementations)
server.add_generic_rpc_handlers((handler,))
else:
raise NotImplementedError('Unsupported server type {}'.format(
config.server_type))
if config.HasField('security_params'): # Use SSL
server_creds = grpc.ssl_server_credentials(
((resources.private_key(), resources.certificate_chain()),))
port = server.add_secure_port('[::]:{}'.format(config.port),
server_creds)
else:
port = server.add_insecure_port('[::]:{}'.format(config.port))
return server, port
def _get_client_status(start_time: float, end_time: float,
qps_data: histogram.Histogram
) -> control_pb2.ClientStatus:
"""Creates ClientStatus proto message."""
latencies = qps_data.get_data()
end_time = time.monotonic()
elapsed_time = end_time - start_time
# TODO(lidiz) Collect accurate time system to compute QPS/core-second.
stats = stats_pb2.ClientStats(latencies=latencies,
time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ClientStatus(stats=stats)
def _create_client(server: str, config: control_pb2.ClientConfig,
qps_data: histogram.Histogram
) -> benchmark_client.BenchmarkClient:
"""Creates a client object according to the ClientConfig."""
if config.load_params.WhichOneof('load') != 'closed_loop':
raise NotImplementedError(
f'Unsupported load parameter {config.load_params}')
if config.client_type == control_pb2.ASYNC_CLIENT:
if config.rpc_type == control_pb2.UNARY:
client_type = benchmark_client.UnaryAsyncBenchmarkClient
elif config.rpc_type == control_pb2.STREAMING:
client_type = benchmark_client.StreamingAsyncBenchmarkClient
else:
raise NotImplementedError(
f'Unsupported rpc_type [{config.rpc_type}]')
else:
raise NotImplementedError(
f'Unsupported client type {config.client_type}')
return client_type(server, config, qps_data)
def _pick_an_unused_port() -> int:
"""Picks an unused TCP port."""
_, port, sock = get_socket()
sock.close()
return port
async def _create_sub_worker() -> _SubWorker:
"""Creates a child qps worker as a subprocess."""
port = _pick_an_unused_port()
_LOGGER.info('Creating sub worker at port [%d]...', port)
process = await asyncio.create_subprocess_exec(sys.executable,
_WORKER_ENTRY_FILE,
'--driver_port', str(port))
_LOGGER.info('Created sub worker process for port [%d] at pid [%d]', port,
process.pid)
channel = aio.insecure_channel(f'localhost:{port}')
_LOGGER.info('Waiting for sub worker at port [%d]', port)
await channel.channel_ready()
stub = worker_service_pb2_grpc.WorkerServiceStub(channel)
return _SubWorker(
process=process,
port=port,
channel=channel,
stub=stub,
)
class WorkerServicer(worker_service_pb2_grpc.WorkerServiceServicer):
"""Python Worker Server implementation."""
def __init__(self):
self._loop = asyncio.get_event_loop()
self._quit_event = asyncio.Event()
async def _run_single_server(self, config, request_iterator, context):
server, port = _create_server(config)
await server.start()
_LOGGER.info('Server started at port [%d]', port)
start_time = time.monotonic()
await context.write(_get_server_status(start_time, start_time, port))
async for request in request_iterator:
end_time = time.monotonic()
status = _get_server_status(start_time, end_time, port)
if request.mark.reset:
start_time = end_time
await context.write(status)
await server.stop(None)
async def RunServer(self, request_iterator, context):
config_request = await context.read()
config = config_request.setup
_LOGGER.info('Received ServerConfig: %s', config)
if config.server_processes <= 0:
_LOGGER.info('Using server_processes == [%d]', _NUM_CORES)
config.server_processes = _NUM_CORES
if config.port == 0:
config.port = _pick_an_unused_port()
_LOGGER.info('Port picked [%d]', config.port)
if config.server_processes == 1:
# If server_processes == 1, start the server in this process.
await self._run_single_server(config, request_iterator, context)
else:
# If server_processes > 1, offload to other processes.
sub_workers = await asyncio.gather(*(
_create_sub_worker() for _ in range(config.server_processes)))
calls = [worker.stub.RunServer() for worker in sub_workers]
config_request.setup.server_processes = 1
for call in calls:
await call.write(config_request)
# An empty status indicates the peer is ready
await call.read()
start_time = time.monotonic()
await context.write(
_get_server_status(
start_time,
start_time,
config.port,
))
_LOGGER.info('Servers are ready to serve.')
async for request in request_iterator:
end_time = time.monotonic()
for call in calls:
await call.write(request)
# Reports from sub workers doesn't matter
await call.read()
status = _get_server_status(
start_time,
end_time,
config.port,
)
if request.mark.reset:
start_time = end_time
await context.write(status)
for call in calls:
await call.done_writing()
for worker in sub_workers:
await worker.stub.QuitWorker(control_pb2.Void())
await worker.channel.close()
_LOGGER.info('Waiting for [%s] to quit...', worker)
await worker.process.wait()
async def _run_single_client(self, config, request_iterator, context):
running_tasks = []
qps_data = histogram.Histogram(config.histogram_params.resolution,
config.histogram_params.max_possible)
start_time = time.monotonic()
# Create a client for each channel as asyncio.Task
for i in range(config.client_channels):
server = config.server_targets[i % len(config.server_targets)]
client = _create_client(server, config, qps_data)
_LOGGER.info('Client created against server [%s]', server)
running_tasks.append(self._loop.create_task(client.run()))
end_time = time.monotonic()
await context.write(_get_client_status(start_time, end_time, qps_data))
# Respond to stat requests
async for request in request_iterator:
end_time = time.monotonic()
status = _get_client_status(start_time, end_time, qps_data)
if request.mark.reset:
qps_data.reset()
start_time = time.monotonic()
await context.write(status)
# Cleanup the clients
for task in running_tasks:
task.cancel()
async def RunClient(self, request_iterator, context):
config_request = await context.read()
config = config_request.setup
_LOGGER.info('Received ClientConfig: %s', config)
if config.client_processes <= 0:
_LOGGER.info('client_processes can\'t be [%d]',
config.client_processes)
_LOGGER.info('Using client_processes == [%d]', _NUM_CORES)
config.client_processes = _NUM_CORES
if config.client_processes == 1:
# If client_processes == 1, run the benchmark in this process.
await self._run_single_client(config, request_iterator, context)
else:
# If client_processes > 1, offload the work to other processes.
sub_workers = await asyncio.gather(*(
_create_sub_worker() for _ in range(config.client_processes)))
calls = [worker.stub.RunClient() for worker in sub_workers]
config_request.setup.client_processes = 1
for call in calls:
await call.write(config_request)
# An empty status indicates the peer is ready
await call.read()
start_time = time.monotonic()
result = histogram.Histogram(config.histogram_params.resolution,
config.histogram_params.max_possible)
end_time = time.monotonic()
await context.write(_get_client_status(start_time, end_time,
result))
async for request in request_iterator:
end_time = time.monotonic()
for call in calls:
_LOGGER.debug('Fetching status...')
await call.write(request)
sub_status = await call.read()
result.merge(sub_status.stats.latencies)
_LOGGER.debug('Update from sub worker count=[%d]',
sub_status.stats.latencies.count)
status = _get_client_status(start_time, end_time, result)
if request.mark.reset:
result.reset()
start_time = time.monotonic()
_LOGGER.debug('Reporting count=[%d]',
status.stats.latencies.count)
await context.write(status)
for call in calls:
await call.done_writing()
for worker in sub_workers:
await worker.stub.QuitWorker(control_pb2.Void())
await worker.channel.close()
_LOGGER.info('Waiting for sub worker [%s] to quit...', worker)
await worker.process.wait()
_LOGGER.info('Sub worker [%s] quit', worker)
@staticmethod
async def CoreCount(unused_request, unused_context):
return control_pb2.CoreResponse(cores=_NUM_CORES)
async def QuitWorker(self, unused_request, unused_context):
_LOGGER.info('QuitWorker command received.')
self._quit_event.set()
return control_pb2.Void()
async def wait_for_quit(self):
await self._quit_event.wait()

@ -0,0 +1,29 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(default_testonly = 1)
py_test(
name = "health_servicer_test",
size = "small",
srcs = ["health_servicer_test.py"],
imports = ["../../"],
python_version = "PY3",
deps = [
"//src/python/grpcio/grpc:grpcio",
"//src/python/grpcio_health_checking/grpc_health/v1:grpc_health",
"//src/python/grpcio_tests/tests/unit/framework/common",
"//src/python/grpcio_tests/tests_aio/unit:_test_base",
],
)

@ -0,0 +1,13 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

@ -0,0 +1,262 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests AsyncIO version of grpcio-health-checking."""
import asyncio
import logging
import time
import random
import unittest
import grpc
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from grpc.experimental import aio
from tests.unit.framework.common import test_constants
from tests_aio.unit._test_base import AioTestBase
_SERVING_SERVICE = 'grpc.test.TestServiceServing'
_UNKNOWN_SERVICE = 'grpc.test.TestServiceUnknown'
_NOT_SERVING_SERVICE = 'grpc.test.TestServiceNotServing'
_WATCH_SERVICE = 'grpc.test.WatchService'
_LARGE_NUMBER_OF_STATUS_CHANGES = 1000
async def _pipe_to_queue(call, queue):
async for response in call:
await queue.put(response)
class HealthServicerTest(AioTestBase):
async def setUp(self):
self._servicer = health.aio.HealthServicer()
await self._servicer.set(health.OVERALL_HEALTH,
health_pb2.HealthCheckResponse.SERVING)
await self._servicer.set(_SERVING_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
await self._servicer.set(_UNKNOWN_SERVICE,
health_pb2.HealthCheckResponse.UNKNOWN)
await self._servicer.set(_NOT_SERVING_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self._server = aio.server()
port = self._server.add_insecure_port('[::]:0')
health_pb2_grpc.add_HealthServicer_to_server(self._servicer,
self._server)
await self._server.start()
self._channel = aio.insecure_channel('localhost:%d' % port)
self._stub = health_pb2_grpc.HealthStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def test_check_empty_service(self):
request = health_pb2.HealthCheckRequest()
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_unknown_service(self):
request = health_pb2.HealthCheckRequest(service=_UNKNOWN_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
async def test_check_not_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_NOT_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
async def test_check_not_found_service(self):
request = health_pb2.HealthCheckRequest(service='not-found')
with self.assertRaises(aio.AioRpcError) as context:
await self._stub.Check(request)
self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
async def test_health_service_name(self):
self.assertEqual(health.SERVICE_NAME, 'grpc.health.v1.Health')
async def test_watch_empty_service(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
call.cancel()
await task
self.assertTrue(queue.empty())
async def test_watch_new_service(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
call.cancel()
await task
self.assertTrue(queue.empty())
async def test_watch_service_isolation(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set('some-other-service',
health_pb2.HealthCheckResponse.SERVING)
# The change of health status in other service should be isolated.
# Hence, no additional notification should be observed.
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(queue.get(), test_constants.SHORT_TIMEOUT)
call.cancel()
await task
self.assertTrue(queue.empty())
async def test_two_watchers(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
queue1 = asyncio.Queue()
queue2 = asyncio.Queue()
call1 = self._stub.Watch(request)
call2 = self._stub.Watch(request)
task1 = self.loop.create_task(_pipe_to_queue(call1, queue1))
task2 = self.loop.create_task(_pipe_to_queue(call2, queue2))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue2.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue2.get()).status)
call1.cancel()
call2.cancel()
await task1
await task2
self.assertTrue(queue1.empty())
self.assertTrue(queue2.empty())
async def test_cancelled_watch_removed_from_watch_list(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
call.cancel()
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
await task
# Wait for the serving coroutine to process client cancellation.
timeout = time.monotonic() + test_constants.TIME_ALLOWANCE
while (time.monotonic() < timeout and self._servicer._server_watchers):
await asyncio.sleep(1)
self.assertFalse(self._servicer._server_watchers,
'There should not be any watcher left')
self.assertTrue(queue.empty())
async def test_graceful_shutdown(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.enter_graceful_shutdown()
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
# This should be a no-op.
await self._servicer.set(health.OVERALL_HEALTH,
health_pb2.HealthCheckResponse.SERVING)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
call.cancel()
await task
self.assertTrue(queue.empty())
async def test_no_duplicate_status(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
last_status = health_pb2.HealthCheckResponse.SERVICE_UNKNOWN
for _ in range(_LARGE_NUMBER_OF_STATUS_CHANGES):
if random.randint(0, 1) == 0:
status = health_pb2.HealthCheckResponse.SERVING
else:
status = health_pb2.HealthCheckResponse.NOT_SERVING
await self._servicer.set(_WATCH_SERVICE, status)
if status != last_status:
self.assertEqual(status, (await queue.get()).status)
last_status = status
call.cancel()
await task
self.assertTrue(queue.empty())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -1,5 +1,6 @@
[ [
"_sanity._sanity_test.AioSanityTest", "_sanity._sanity_test.AioSanityTest",
"health_check.health_servicer_test.HealthServicerTest",
"interop.local_interop_test.InsecureLocalInteropTest", "interop.local_interop_test.InsecureLocalInteropTest",
"interop.local_interop_test.SecureLocalInteropTest", "interop.local_interop_test.SecureLocalInteropTest",
"unit.abort_test.TestAbort", "unit.abort_test.TestAbort",

@ -0,0 +1,21 @@
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from tests import _loader
from tests import _runner
Loader = _loader.Loader
Runner = _runner.Runner

@ -0,0 +1,41 @@
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(
default_testonly = True,
)
GRPCIO_PY3_ONLY_TESTS_UNIT = glob([
"*_test.py",
])
[
py_test(
name = test_file_name[:-len(".py")],
size = "small",
srcs = [test_file_name],
main = test_file_name,
python_version = "PY3",
srcs_version = "PY3",
deps = [
"//src/python/grpcio/grpc:grpcio",
"//src/python/grpcio_tests/tests/testing",
"//src/python/grpcio_tests/tests/unit:resources",
"//src/python/grpcio_tests/tests/unit:test_common",
"//src/python/grpcio_tests/tests/unit/framework/common",
"@six",
],
)
for test_file_name in GRPCIO_PY3_ONLY_TESTS_UNIT
]

@ -0,0 +1,13 @@
# Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

@ -0,0 +1,276 @@
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Simple Stubs."""
# TODO(https://github.com/grpc/grpc/issues/21965): Run under setuptools.
import os
_MAXIMUM_CHANNELS = 10
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"] = "1"
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"] = str(_MAXIMUM_CHANNELS)
import contextlib
import datetime
import inspect
import logging
import unittest
import sys
import time
from typing import Callable, Optional
from tests.unit import test_common
import grpc
import grpc.experimental
_REQUEST = b"0000"
_CACHE_EPOCHS = 8
_CACHE_TRIALS = 6
_SERVER_RESPONSE_COUNT = 10
_CLIENT_REQUEST_COUNT = _SERVER_RESPONSE_COUNT
_STRESS_EPOCHS = _MAXIMUM_CHANNELS * 10
_UNARY_UNARY = "/test/UnaryUnary"
_UNARY_STREAM = "/test/UnaryStream"
_STREAM_UNARY = "/test/StreamUnary"
_STREAM_STREAM = "/test/StreamStream"
def _unary_unary_handler(request, context):
return request
def _unary_stream_handler(request, context):
for _ in range(_SERVER_RESPONSE_COUNT):
yield request
def _stream_unary_handler(request_iterator, context):
request = None
for single_request in request_iterator:
request = single_request
return request
def _stream_stream_handler(request_iterator, context):
for request in request_iterator:
yield request
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_unary_unary_handler)
elif handler_call_details.method == _UNARY_STREAM:
return grpc.unary_stream_rpc_method_handler(_unary_stream_handler)
elif handler_call_details.method == _STREAM_UNARY:
return grpc.stream_unary_rpc_method_handler(_stream_unary_handler)
elif handler_call_details.method == _STREAM_STREAM:
return grpc.stream_stream_rpc_method_handler(_stream_stream_handler)
else:
raise NotImplementedError()
def _time_invocation(to_time: Callable[[], None]) -> datetime.timedelta:
start = datetime.datetime.now()
to_time()
return datetime.datetime.now() - start
@contextlib.contextmanager
def _server(credentials: Optional[grpc.ServerCredentials]):
try:
server = test_common.test_server()
target = '[::]:0'
if credentials is None:
port = server.add_insecure_port(target)
else:
port = server.add_secure_port(target, credentials)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
yield port
finally:
server.stop(None)
class SimpleStubsTest(unittest.TestCase):
def assert_cached(self, to_check: Callable[[str], None]) -> None:
"""Asserts that a function caches intermediate data/state.
To be specific, given a function whose caching behavior is
deterministic in the value of a supplied string, this function asserts
that, on average, subsequent invocations of the function for a specific
string are faster than first invocations with that same string.
Args:
to_check: A function returning nothing, that caches values based on
an arbitrary supplied string.
"""
initial_runs = []
cached_runs = []
for epoch in range(_CACHE_EPOCHS):
runs = []
text = str(epoch)
for trial in range(_CACHE_TRIALS):
runs.append(_time_invocation(lambda: to_check(text)))
initial_runs.append(runs[0])
cached_runs.extend(runs[1:])
average_cold = sum((run for run in initial_runs),
datetime.timedelta()) / len(initial_runs)
average_warm = sum((run for run in cached_runs),
datetime.timedelta()) / len(cached_runs)
self.assertLess(average_warm, average_cold)
def assert_eventually(self,
predicate: Callable[[], bool],
*,
timeout: Optional[datetime.timedelta] = None,
message: Optional[Callable[[], str]] = None) -> None:
message = message or (lambda: "Proposition did not evaluate to true")
timeout = timeout or datetime.timedelta(seconds=10)
end = datetime.datetime.now() + timeout
while datetime.datetime.now() < end:
if predicate():
break
time.sleep(0.5)
else:
self.fail(message() + " after " + str(timeout))
def test_unary_unary_insecure(self):
with _server(None) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.experimental.
insecure_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_unary_unary_secure(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_channel_credentials_default(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(_REQUEST, target,
_UNARY_UNARY)
self.assertEqual(_REQUEST, response)
def test_channels_cached(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
test_name = inspect.stack()[0][3]
args = (_REQUEST, target, _UNARY_UNARY)
kwargs = {"channel_credentials": grpc.local_channel_credentials()}
def _invoke(seed: str):
run_kwargs = dict(kwargs)
run_kwargs["options"] = ((test_name + seed, ""),)
grpc.experimental.unary_unary(*args, **run_kwargs)
self.assert_cached(_invoke)
def test_channels_evicted(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() == 0,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} remain"
)
def test_total_channels_enforced(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for i in range(_STRESS_EPOCHS):
# Ensure we get a new channel each time.
options = (("foo", str(i)),)
# Send messages at full blast.
grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
options=options,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() <= _MAXIMUM_CHANNELS + 1,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} channels remain"
)
def test_unary_stream(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.unary_stream(
_REQUEST,
target,
_UNARY_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
def test_stream_unary(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.stream_unary(
request_iter(),
target,
_STREAM_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_stream_stream(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.stream_stream(
request_iter(),
target,
_STREAM_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main(verbosity=2)

@ -274,7 +274,7 @@
endif endif
ifeq ($(SYSTEM),MINGW32) ifeq ($(SYSTEM),MINGW32)
LIBS = m pthread ws2_32 LIBS = m pthread ws2_32 dbghelp
LDFLAGS += -pthread LDFLAGS += -pthread
endif endif

@ -481,8 +481,8 @@ TEST_F(ChannelzRegistryBasedTest, GetTopChannelsUuidAfterCompaction) {
// these will delete and unregister themselves after this block. // these will delete and unregister themselves after this block.
std::vector<std::unique_ptr<ChannelFixture>> odd_channels; std::vector<std::unique_ptr<ChannelFixture>> odd_channels;
for (int i = 0; i < kLoopIterations; i++) { for (int i = 0; i < kLoopIterations; i++) {
odd_channels.push_back(grpc_core::MakeUnique<ChannelFixture>()); odd_channels.push_back(absl::make_unique<ChannelFixture>());
even_channels.push_back(grpc_core::MakeUnique<ChannelFixture>()); even_channels.push_back(absl::make_unique<ChannelFixture>());
} }
} }
std::string json_str = ChannelzRegistry::GetTopChannels(0); std::string json_str = ChannelzRegistry::GetTopChannels(0);

@ -75,7 +75,7 @@ static grpc_ares_request* my_dns_lookup_ares_locked(
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Forced Failure"); error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Forced Failure");
} else { } else {
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
*addresses = grpc_core::MakeUnique<grpc_core::ServerAddressList>(); *addresses = absl::make_unique<grpc_core::ServerAddressList>();
grpc_resolved_address dummy_resolved_address; grpc_resolved_address dummy_resolved_address;
memset(&dummy_resolved_address, 0, sizeof(dummy_resolved_address)); memset(&dummy_resolved_address, 0, sizeof(dummy_resolved_address));
dummy_resolved_address.len = 123; dummy_resolved_address.len = 123;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save