@ -0,0 +1,3 @@ |
||||
# load bazelrc from the legacy location |
||||
# as recommended in https://github.com/bazelbuild/bazel/issues/6319 |
||||
import %workspace%/tools/bazel.rc |
@ -1,6 +1,6 @@ |
||||
--- |
||||
Checks: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size' |
||||
WarningsAsErrors: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size' |
||||
Checks: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*,bugprone-*' |
||||
WarningsAsErrors: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*,bugprone-*' |
||||
CheckOptions: |
||||
- key: readability-function-size.StatementThreshold |
||||
value: '450' |
||||
|
@ -0,0 +1,2 @@ |
||||
daysUntilLock: 90 |
||||
lockComment: false |
@ -0,0 +1,18 @@ |
||||
mergeable: |
||||
pull_requests: |
||||
label: |
||||
and: |
||||
- must_exclude: |
||||
regex: '^disposition/DO NOT MERGE' |
||||
message: 'Pull request marked not mergeable' |
||||
- or: |
||||
- and: |
||||
- must_include: |
||||
regex: 'release notes: yes' |
||||
message: 'Please include release note: yes' |
||||
- must_include: |
||||
regex: '^lang\/' |
||||
message: 'Please include a language label' |
||||
- must_include: |
||||
regex: 'release notes: no' |
||||
message: 'Please include release note: no' |
@ -0,0 +1,100 @@ |
||||
[MASTER] |
||||
ignore= |
||||
src/python/grpcio/grpc/beta, |
||||
src/python/grpcio/grpc/framework, |
||||
src/python/grpcio/grpc/framework/common, |
||||
src/python/grpcio/grpc/framework/foundation, |
||||
src/python/grpcio/grpc/framework/interfaces, |
||||
|
||||
[VARIABLES] |
||||
|
||||
# TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection |
||||
# not include "unused_" and "ignored_" by default? |
||||
dummy-variables-rgx=^ignored_|^unused_ |
||||
|
||||
[DESIGN] |
||||
|
||||
# NOTE(nathaniel): Not particularly attached to this value; it just seems to |
||||
# be what works for us at the moment (excepting the dead-code-walking Beta |
||||
# API). |
||||
max-args=6 |
||||
|
||||
[MISCELLANEOUS] |
||||
|
||||
# NOTE(nathaniel): We are big fans of "TODO(<issue link>): " and |
||||
# "NOTE(<username or issue link>): ". We do not allow "TODO:", |
||||
# "TODO(<username>):", "FIXME:", or anything else. |
||||
notes=FIXME,XXX |
||||
|
||||
[MESSAGES CONTROL] |
||||
|
||||
disable= |
||||
# -- START OF EXAMPLE-SPECIFIC SUPPRESSIONS -- |
||||
no-self-use, |
||||
unused-argument, |
||||
unused-variable, |
||||
# -- END OF EXAMPLE-SPECIFIC SUPPRESSIONS -- |
||||
|
||||
# TODO(https://github.com/PyCQA/pylint/issues/59#issuecomment-283774279): |
||||
# Enable cyclic-import after a 1.7-or-later pylint release that |
||||
# recognizes our disable=cyclic-import suppressions. |
||||
cyclic-import, |
||||
# TODO(https://github.com/grpc/grpc/issues/8622): Enable this after the |
||||
# Beta API is removed. |
||||
duplicate-code, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Doesn't seem to |
||||
# understand enum and concurrent.futures; look into this later with the |
||||
# latest pylint version. |
||||
import-error, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Enable this one. |
||||
# Should take a little configuration but not much. |
||||
invalid-name, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): This doesn't seem to |
||||
# work for now? Try with a later pylint? |
||||
locally-disabled, |
||||
# NOTE(nathaniel): What even is this? *Enabling* an inspection results |
||||
# in a warning? How does that encourage more analysis and coverage? |
||||
locally-enabled, |
||||
# NOTE(nathaniel): We don't write doc strings for most private code |
||||
# elements. |
||||
missing-docstring, |
||||
# NOTE(nathaniel): In numeric comparisons it is better to have the |
||||
# lesser (or lesser-or-equal-to) quantity on the left when the |
||||
# expression is true than it is to worry about which is an identifier |
||||
# and which a literal value. |
||||
misplaced-comparison-constant, |
||||
# NOTE(nathaniel): Our completely abstract interface classes don't have |
||||
# constructors. |
||||
no-init, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Doesn't yet play |
||||
# nicely with some of our code being implemented in Cython. Maybe in a |
||||
# later version? |
||||
no-name-in-module, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Suppress these where |
||||
# the odd shape of the authentication portion of the API forces them on |
||||
# us and enable everywhere else. |
||||
protected-access, |
||||
# NOTE(nathaniel): Pylint and I will probably never agree on this. |
||||
too-few-public-methods, |
||||
# NOTE(nathaniel): Pylint and I wil probably never agree on this for |
||||
# private classes. For public classes maybe? |
||||
too-many-instance-attributes, |
||||
# NOTE(nathaniel): Some of our modules have a lot of lines... of |
||||
# specification and documentation. Maybe if this were |
||||
# lines-of-code-based we would use it. |
||||
too-many-lines, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Maybe we could have |
||||
# this one if we extracted just a few more helper functions... |
||||
too-many-nested-blocks, |
||||
# TODO(https://github.com/grpc/grpc/issues/261): Disable unnecessary |
||||
# super-init requirement for abstract class implementations for now. |
||||
super-init-not-called, |
||||
# NOTE(nathaniel): A single statement that always returns program |
||||
# control is better than two statements the first of which sometimes |
||||
# returns program control and the second of which always returns |
||||
# program control. Probably generally, but definitely in the cases of |
||||
# if:/else: and for:/else:. |
||||
useless-else-on-loop, |
||||
no-else-return, |
||||
# NOTE(lidiz): Python 3 make object inheritance default, but not PY2 |
||||
useless-object-inheritance, |
@ -1,30 +0,0 @@ |
||||
{ |
||||
"version": "0.2.0", |
||||
"configurations": [ |
||||
{ |
||||
"type": "node", |
||||
"request": "launch", |
||||
"name": "Mocha Tests", |
||||
"cwd": "${workspaceRoot}", |
||||
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/mocha", |
||||
"windows": { |
||||
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/mocha.cmd" |
||||
}, |
||||
"runtimeArgs": [ |
||||
"-u", |
||||
"tdd", |
||||
"--timeout", |
||||
"999999", |
||||
"--colors", |
||||
"${workspaceRoot}/src/node/test" |
||||
], |
||||
"internalConsoleOptions": "openOnSessionStart" |
||||
}, |
||||
{ |
||||
"type": "node", |
||||
"request": "attach", |
||||
"name": "Attach to Process", |
||||
"port": 5858 |
||||
} |
||||
] |
||||
} |
@ -0,0 +1,63 @@ |
||||
# gRPC Concepts Overview |
||||
|
||||
Remote Procedure Calls (RPCs) provide a useful abstraction for building |
||||
distributed applications and services. The libraries in this repository |
||||
provide a concrete implementation of the gRPC protocol, layered over HTTP/2. |
||||
These libraries enable communication between clients and servers using any |
||||
combination of the supported languages. |
||||
|
||||
|
||||
## Interface |
||||
|
||||
Developers using gRPC start with a language agnostic description of an RPC service (a collection |
||||
of methods). From this description, gRPC will generate client and server side interfaces |
||||
in any of the supported languages. The server implements |
||||
the service interface, which can be remotely invoked by the client interface. |
||||
|
||||
By default, gRPC uses [Protocol Buffers](https://github.com/google/protobuf) as the |
||||
Interface Definition Language (IDL) for describing both the service interface |
||||
and the structure of the payload messages. It is possible to use other |
||||
alternatives if desired. |
||||
|
||||
### Invoking & handling remote calls |
||||
Starting from an interface definition in a .proto file, gRPC provides |
||||
Protocol Compiler plugins that generate Client- and Server-side APIs. |
||||
gRPC users call into these APIs on the Client side and implement |
||||
the corresponding API on the server side. |
||||
|
||||
#### Synchronous vs. asynchronous |
||||
Synchronous RPC calls, that block until a response arrives from the server, are |
||||
the closest approximation to the abstraction of a procedure call that RPC |
||||
aspires to. |
||||
|
||||
On the other hand, networks are inherently asynchronous and in many scenarios, |
||||
it is desirable to have the ability to start RPCs without blocking the current |
||||
thread. |
||||
|
||||
The gRPC programming surface in most languages comes in both synchronous and |
||||
asynchronous flavors. |
||||
|
||||
|
||||
## Streaming |
||||
|
||||
gRPC supports streaming semantics, where either the client or the server (or both) |
||||
send a stream of messages on a single RPC call. The most general case is |
||||
Bidirectional Streaming where a single gRPC call establishes a stream in which both |
||||
the client and the server can send a stream of messages to each other. The streamed |
||||
messages are delivered in the order they were sent. |
||||
|
||||
|
||||
# Protocol |
||||
|
||||
The [gRPC protocol](doc/PROTOCOL-HTTP2.md) specifies the abstract requirements for communication between |
||||
clients and servers. A concrete embedding over HTTP/2 completes the picture by |
||||
fleshing out the details of each of the required operations. |
||||
|
||||
## Abstract gRPC protocol |
||||
A gRPC call comprises of a bidirectional stream of messages, initiated by the client. In the client-to-server direction, this stream begins with a mandatory `Call Header`, followed by optional `Initial-Metadata`, followed by zero or more `Payload Messages`. The server-to-client direction contains an optional `Initial-Metadata`, followed by zero or more `Payload Messages` terminated with a mandatory `Status` and optional `Status-Metadata` (a.k.a.,`Trailing-Metadata`). |
||||
|
||||
## Implementation over HTTP/2 |
||||
The abstract protocol defined above is implemented over [HTTP/2](https://http2.github.io/). gRPC bidirectional streams are mapped to HTTP/2 streams. The contents of `Call Header` and `Initial Metadata` are sent as HTTP/2 headers and subject to HPACK compression. `Payload Messages` are serialized into a byte stream of length prefixed gRPC frames which are then fragmented into HTTP/2 frames at the sender and reassembled at the receiver. `Status` and `Trailing-Metadata` are sent as HTTP/2 trailing headers (a.k.a., trailers). |
||||
|
||||
## Flow Control |
||||
gRPC uses the flow control mechanism in HTTP/2. This enables fine-grained control of memory used for buffering in-flight messages. |
@ -0,0 +1,43 @@ |
||||
# Troubleshooting gRPC |
||||
|
||||
This guide is for troubleshooting gRPC implementations based on C core library (sources for most of them are living in the `grpc/grpc` repository). |
||||
|
||||
## Enabling extra logging and tracing |
||||
|
||||
Extra logging can be very useful for diagnosing problems. All gRPC implementations based on C core library support |
||||
the `GRPC_VERBOSITY` and `GRPC_TRACE` environment variables that can be used to increase the amount of information |
||||
that gets printed to stderr. |
||||
|
||||
## GRPC_VERBOSITY |
||||
|
||||
`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed. |
||||
|
||||
## GRPC_TRACE |
||||
|
||||
`GRPC_TRACE` can be used to enable extra logging for some internal gRPC components. Enabling the right traces can be invaluable |
||||
for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are listed in [Environment Variables Overview](doc/environment_variables.md). |
||||
Multiple traces can be enable at once (use comma as separator). |
||||
|
||||
``` |
||||
# Enable debug logs for an application |
||||
GRPC_VERBOSITY=debug ./helloworld_application_using_grpc |
||||
``` |
||||
|
||||
``` |
||||
# Print information about invocations of low-level C core API. |
||||
# Note that trace logs of log level DEBUG won't be displayed. |
||||
# Also note that most tracers user log level INFO, so without setting |
||||
# GPRC_VERBOSITY accordingly, no traces will be printed. |
||||
GRPC_VERBOSITY=info GRPC_TRACE=api ./helloworld_application_using_grpc |
||||
``` |
||||
|
||||
``` |
||||
# Print info from 3 different tracers, including tracing logs with log level DEBUG |
||||
GRPC_VERBOSITY=debug GRPC_TRACE=tcp,http,api ./helloworld_application_using_grpc |
||||
``` |
||||
|
||||
Known limitations: `GPRC_TRACE=tcp` is currently not implemented for Windows (you won't see any tcp traces). |
||||
|
||||
Please note that the `GRPC_TRACE` environment variable has nothing to do with gRPC's "tracing" feature (= tracing RPCs in |
||||
microservice environment to gain insight about how requests are processed by deployment), it is merely used to enable printing |
||||
of extra logs. |
@ -1,5 +1,67 @@ |
||||
workspace(name = "com_github_grpc_grpc") |
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") |
||||
load("//bazel:grpc_deps.bzl", "grpc_deps", "grpc_test_only_deps") |
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") |
||||
|
||||
grpc_deps() |
||||
|
||||
grpc_test_only_deps() |
||||
|
||||
register_execution_platforms( |
||||
"//third_party/toolchains:local", |
||||
"//third_party/toolchains:local_large", |
||||
"//third_party/toolchains:rbe_windows", |
||||
) |
||||
|
||||
register_toolchains( |
||||
"//third_party/toolchains/bazel_0.23.2_rbe_windows:cc-toolchain-x64_windows", |
||||
) |
||||
|
||||
git_repository( |
||||
name = "io_bazel_rules_python", |
||||
commit = "8b5d0683a7d878b28fffe464779c8a53659fc645", |
||||
remote = "https://github.com/bazelbuild/rules_python.git", |
||||
) |
||||
|
||||
load("@io_bazel_rules_python//python:pip.bzl", "pip_repositories", "pip_import") |
||||
|
||||
pip_import( |
||||
name = "grpc_python_dependencies", |
||||
requirements = "//:requirements.bazel.txt", |
||||
) |
||||
|
||||
http_archive( |
||||
name = "cython", |
||||
build_file = "//third_party:cython.BUILD", |
||||
sha256 = "d68138a2381afbdd0876c3cb2a22389043fa01c4badede1228ee073032b07a27", |
||||
strip_prefix = "cython-c2b80d87658a8525ce091cbe146cb7eaa29fed5c", |
||||
urls = [ |
||||
"https://github.com/cython/cython/archive/c2b80d87658a8525ce091cbe146cb7eaa29fed5c.tar.gz", |
||||
], |
||||
) |
||||
|
||||
load("//bazel:grpc_python_deps.bzl", "grpc_python_deps") |
||||
|
||||
grpc_python_deps() |
||||
|
||||
load("@bazel_toolchains//rules:rbe_repo.bzl", "rbe_autoconfig") |
||||
|
||||
# Create toolchain configuration for remote execution. |
||||
rbe_autoconfig( |
||||
name = "rbe_default", |
||||
) |
||||
|
||||
load("@bazel_toolchains//rules:environments.bzl", "clang_env") |
||||
load("@bazel_skylib//lib:dicts.bzl", "dicts") |
||||
|
||||
# Create msan toolchain configuration for remote execution. |
||||
rbe_autoconfig( |
||||
name = "rbe_msan", |
||||
env = dicts.add( |
||||
clang_env(), |
||||
{ |
||||
"BAZEL_LINKOPTS": "-lc++:-lc++abi:-lm", |
||||
}, |
||||
), |
||||
) |
||||
|
@ -1,6 +1,6 @@ |
||||
set noparent |
||||
@nicolasnoble |
||||
@dgquintas |
||||
@jtattermusch |
||||
@a11r |
||||
@vjpai |
||||
|
||||
|
@ -1,71 +1,105 @@ |
||||
"""Generates and compiles C++ grpc stubs from proto_library rules.""" |
||||
|
||||
load("//:bazel/generate_cc.bzl", "generate_cc") |
||||
load("//bazel:generate_cc.bzl", "generate_cc") |
||||
load("//bazel:protobuf.bzl", "well_known_proto_libs") |
||||
|
||||
def cc_grpc_library(name, srcs, deps, proto_only, well_known_protos, generate_mocks = False, use_external = False, **kwargs): |
||||
"""Generates C++ grpc classes from a .proto file. |
||||
def cc_grpc_library( |
||||
name, |
||||
srcs, |
||||
deps, |
||||
proto_only = False, |
||||
well_known_protos = False, |
||||
generate_mocks = False, |
||||
use_external = False, |
||||
grpc_only = False, |
||||
**kwargs): |
||||
"""Generates C++ grpc classes for services defined in a proto file. |
||||
|
||||
Assumes the generated classes will be used in cc_api_version = 2. |
||||
If grpc_only is True, this rule is compatible with proto_library and |
||||
cc_proto_library native rules such that it expects proto_library target |
||||
as srcs argument and generates only grpc library classes, expecting |
||||
protobuf messages classes library (cc_proto_library target) to be passed in |
||||
deps argument. By default grpc_only is False which makes this rule to behave |
||||
in a backwards-compatible mode (trying to generate both proto and grpc |
||||
classes). |
||||
|
||||
Arguments: |
||||
name: name of rule. |
||||
srcs: a single proto_library, which wraps the .proto files with services. |
||||
deps: a list of C++ proto_library (or cc_proto_library) which provides |
||||
the compiled code of any message that the services depend on. |
||||
well_known_protos: Should this library additionally depend on well known |
||||
protos |
||||
use_external: When True the grpc deps are prefixed with //external. This |
||||
allows grpc to be used as a dependency in other bazel projects. |
||||
generate_mocks: When True, Google Mock code for client stub is generated. |
||||
**kwargs: rest of arguments, e.g., compatible_with and visibility. |
||||
""" |
||||
if len(srcs) > 1: |
||||
fail("Only one srcs value supported", "srcs") |
||||
Assumes the generated classes will be used in cc_api_version = 2. |
||||
|
||||
proto_target = "_" + name + "_only" |
||||
codegen_target = "_" + name + "_codegen" |
||||
codegen_grpc_target = "_" + name + "_grpc_codegen" |
||||
proto_deps = ["_" + dep + "_only" for dep in deps if dep.find(':') == -1] |
||||
proto_deps += [dep.split(':')[0] + ':' + "_" + dep.split(':')[1] + "_only" for dep in deps if dep.find(':') != -1] |
||||
Args: |
||||
name (str): Name of rule. |
||||
srcs (list): A single .proto file which contains services definitions, |
||||
or if grpc_only parameter is True, a single proto_library which |
||||
contains services descriptors. |
||||
deps (list): A list of C++ proto_library (or cc_proto_library) which |
||||
provides the compiled code of any message that the services depend on. |
||||
proto_only (bool): If True, create only C++ proto classes library, |
||||
avoid creating C++ grpc classes library (expect it in deps). |
||||
Deprecated, use native cc_proto_library instead. False by default. |
||||
well_known_protos (bool): Should this library additionally depend on |
||||
well known protos. Deprecated, the well known protos should be |
||||
specified as explicit dependencies of the proto_library target |
||||
(passed in srcs parameter) instead. False by default. |
||||
generate_mocks (bool): when True, Google Mock code for client stub is |
||||
generated. False by default. |
||||
use_external (bool): Not used. |
||||
grpc_only (bool): if True, generate only grpc library, expecting |
||||
protobuf messages library (cc_proto_library target) to be passed as |
||||
deps. False by default (will become True by default eventually). |
||||
**kwargs: rest of arguments, e.g., compatible_with and visibility |
||||
""" |
||||
if len(srcs) > 1: |
||||
fail("Only one srcs value supported", "srcs") |
||||
if grpc_only and proto_only: |
||||
fail("A mutualy exclusive configuration is specified: grpc_only = True and proto_only = True") |
||||
|
||||
native.proto_library( |
||||
name = proto_target, |
||||
srcs = srcs, |
||||
deps = proto_deps, |
||||
**kwargs |
||||
) |
||||
extra_deps = [] |
||||
proto_targets = [] |
||||
|
||||
generate_cc( |
||||
name = codegen_target, |
||||
srcs = [proto_target], |
||||
well_known_protos = well_known_protos, |
||||
**kwargs |
||||
) |
||||
if not grpc_only: |
||||
proto_target = "_" + name + "_only" |
||||
cc_proto_target = name if proto_only else "_" + name + "_cc_proto" |
||||
|
||||
if not proto_only: |
||||
plugin = "@com_github_grpc_grpc//:grpc_cpp_plugin" |
||||
generate_cc( |
||||
name = codegen_grpc_target, |
||||
srcs = [proto_target], |
||||
plugin = plugin, |
||||
well_known_protos = well_known_protos, |
||||
generate_mocks = generate_mocks, |
||||
**kwargs |
||||
) |
||||
grpc_deps = ["@com_github_grpc_grpc//:grpc++_codegen_proto", |
||||
"//external:protobuf"] |
||||
native.cc_library( |
||||
name = name, |
||||
srcs = [":" + codegen_grpc_target, ":" + codegen_target], |
||||
hdrs = [":" + codegen_grpc_target, ":" + codegen_target], |
||||
deps = deps + grpc_deps, |
||||
**kwargs |
||||
) |
||||
else: |
||||
native.cc_library( |
||||
name = name, |
||||
srcs = [":" + codegen_target], |
||||
hdrs = [":" + codegen_target], |
||||
deps = deps + ["//external:protobuf"], |
||||
**kwargs |
||||
) |
||||
proto_deps = ["_" + dep + "_only" for dep in deps if dep.find(":") == -1] |
||||
proto_deps += [dep.split(":")[0] + ":" + "_" + dep.split(":")[1] + "_only" for dep in deps if dep.find(":") != -1] |
||||
if well_known_protos: |
||||
proto_deps += well_known_proto_libs() |
||||
|
||||
native.proto_library( |
||||
name = proto_target, |
||||
srcs = srcs, |
||||
deps = proto_deps, |
||||
**kwargs |
||||
) |
||||
|
||||
native.cc_proto_library( |
||||
name = cc_proto_target, |
||||
deps = [":" + proto_target], |
||||
**kwargs |
||||
) |
||||
extra_deps.append(":" + cc_proto_target) |
||||
proto_targets.append(proto_target) |
||||
else: |
||||
if not srcs: |
||||
fail("srcs cannot be empty", "srcs") |
||||
proto_targets += srcs |
||||
|
||||
if not proto_only: |
||||
codegen_grpc_target = "_" + name + "_grpc_codegen" |
||||
generate_cc( |
||||
name = codegen_grpc_target, |
||||
srcs = proto_targets, |
||||
plugin = "@com_github_grpc_grpc//:grpc_cpp_plugin", |
||||
well_known_protos = well_known_protos, |
||||
generate_mocks = generate_mocks, |
||||
**kwargs |
||||
) |
||||
|
||||
native.cc_library( |
||||
name = name, |
||||
srcs = [":" + codegen_grpc_target], |
||||
hdrs = [":" + codegen_grpc_target], |
||||
deps = deps + |
||||
extra_deps + |
||||
["@com_github_grpc_grpc//:grpc++_codegen_proto"], |
||||
**kwargs |
||||
) |
||||
|
@ -0,0 +1,74 @@ |
||||
"""Custom rules for gRPC Python""" |
||||
|
||||
|
||||
# Adapted with modifications from |
||||
# tensorflow/tensorflow/core/platform/default/build_config.bzl |
||||
# Native Bazel rules don't exist yet to compile Cython code, but rules have |
||||
# been written at cython/cython and tensorflow/tensorflow. We branch from |
||||
# Tensorflow's version as it is more actively maintained and works for gRPC |
||||
# Python's needs. |
||||
def pyx_library(name, deps=[], py_deps=[], srcs=[], **kwargs): |
||||
"""Compiles a group of .pyx / .pxd / .py files. |
||||
|
||||
First runs Cython to create .cpp files for each input .pyx or .py + .pxd |
||||
pair. Then builds a shared object for each, passing "deps" to each cc_binary |
||||
rule (includes Python headers by default). Finally, creates a py_library rule |
||||
with the shared objects and any pure Python "srcs", with py_deps as its |
||||
dependencies; the shared objects can be imported like normal Python files. |
||||
|
||||
Args: |
||||
name: Name for the rule. |
||||
deps: C/C++ dependencies of the Cython (e.g. Numpy headers). |
||||
py_deps: Pure Python dependencies of the final library. |
||||
srcs: .py, .pyx, or .pxd files to either compile or pass through. |
||||
**kwargs: Extra keyword arguments passed to the py_library. |
||||
""" |
||||
# First filter out files that should be run compiled vs. passed through. |
||||
py_srcs = [] |
||||
pyx_srcs = [] |
||||
pxd_srcs = [] |
||||
for src in srcs: |
||||
if src.endswith(".pyx") or (src.endswith(".py") and |
||||
src[:-3] + ".pxd" in srcs): |
||||
pyx_srcs.append(src) |
||||
elif src.endswith(".py"): |
||||
py_srcs.append(src) |
||||
else: |
||||
pxd_srcs.append(src) |
||||
if src.endswith("__init__.py"): |
||||
pxd_srcs.append(src) |
||||
|
||||
# Invoke cython to produce the shared object libraries. |
||||
for filename in pyx_srcs: |
||||
native.genrule( |
||||
name=filename + "_cython_translation", |
||||
srcs=[filename], |
||||
outs=[filename.split(".")[0] + ".cpp"], |
||||
# Optionally use PYTHON_BIN_PATH on Linux platforms so that python 3 |
||||
# works. Windows has issues with cython_binary so skip PYTHON_BIN_PATH. |
||||
cmd= |
||||
"PYTHONHASHSEED=0 $(location @cython//:cython_binary) --cplus $(SRCS) --output-file $(OUTS)", |
||||
tools=["@cython//:cython_binary"] + pxd_srcs, |
||||
) |
||||
|
||||
shared_objects = [] |
||||
for src in pyx_srcs: |
||||
stem = src.split(".")[0] |
||||
shared_object_name = stem + ".so" |
||||
native.cc_binary( |
||||
name=shared_object_name, |
||||
srcs=[stem + ".cpp"], |
||||
deps=deps + ["@local_config_python//:python_headers"], |
||||
linkshared=1, |
||||
) |
||||
shared_objects.append(shared_object_name) |
||||
|
||||
# Now create a py_library with these shared objects as data. |
||||
native.py_library( |
||||
name=name, |
||||
srcs=py_srcs, |
||||
deps=py_deps, |
||||
srcs_version="PY2AND3", |
||||
data=shared_objects, |
||||
**kwargs) |
||||
|
@ -0,0 +1,8 @@ |
||||
load("//third_party/py:python_configure.bzl", "python_configure") |
||||
load("@io_bazel_rules_python//python:pip.bzl", "pip_repositories") |
||||
load("@grpc_python_dependencies//:requirements.bzl", "pip_install") |
||||
|
||||
def grpc_python_deps(): |
||||
python_configure(name = "local_config_python") |
||||
pip_repositories() |
||||
pip_install() |
@ -0,0 +1,104 @@ |
||||
"""Utility functions for generating protobuf code.""" |
||||
|
||||
_PROTO_EXTENSION = ".proto" |
||||
|
||||
def well_known_proto_libs(): |
||||
return [ |
||||
"@com_google_protobuf//:any_proto", |
||||
"@com_google_protobuf//:api_proto", |
||||
"@com_google_protobuf//:compiler_plugin_proto", |
||||
"@com_google_protobuf//:descriptor_proto", |
||||
"@com_google_protobuf//:duration_proto", |
||||
"@com_google_protobuf//:empty_proto", |
||||
"@com_google_protobuf//:field_mask_proto", |
||||
"@com_google_protobuf//:source_context_proto", |
||||
"@com_google_protobuf//:struct_proto", |
||||
"@com_google_protobuf//:timestamp_proto", |
||||
"@com_google_protobuf//:type_proto", |
||||
"@com_google_protobuf//:wrappers_proto", |
||||
] |
||||
|
||||
def get_proto_root(workspace_root): |
||||
"""Gets the root protobuf directory. |
||||
|
||||
Args: |
||||
workspace_root: context.label.workspace_root |
||||
|
||||
Returns: |
||||
The directory relative to which generated include paths should be. |
||||
""" |
||||
if workspace_root: |
||||
return "/{}".format(workspace_root) |
||||
else: |
||||
return "" |
||||
|
||||
def _strip_proto_extension(proto_filename): |
||||
if not proto_filename.endswith(_PROTO_EXTENSION): |
||||
fail('"{}" does not end with "{}"'.format( |
||||
proto_filename, |
||||
_PROTO_EXTENSION, |
||||
)) |
||||
return proto_filename[:-len(_PROTO_EXTENSION)] |
||||
|
||||
def proto_path_to_generated_filename(proto_path, fmt_str): |
||||
"""Calculates the name of a generated file for a protobuf path. |
||||
|
||||
For example, "examples/protos/helloworld.proto" might map to |
||||
"helloworld.pb.h". |
||||
|
||||
Args: |
||||
proto_path: The path to the .proto file. |
||||
fmt_str: A format string used to calculate the generated filename. For |
||||
example, "{}.pb.h" might be used to calculate a C++ header filename. |
||||
|
||||
Returns: |
||||
The generated filename. |
||||
""" |
||||
return fmt_str.format(_strip_proto_extension(proto_path)) |
||||
|
||||
def _get_include_directory(include): |
||||
directory = include.path |
||||
prefix_len = 0 |
||||
if not include.is_source and directory.startswith(include.root.path): |
||||
prefix_len = len(include.root.path) + 1 |
||||
|
||||
if directory.startswith("external", prefix_len): |
||||
external_separator = directory.find("/", prefix_len) |
||||
repository_separator = directory.find("/", external_separator + 1) |
||||
return directory[:repository_separator] |
||||
else: |
||||
return include.root.path if include.root.path else "." |
||||
|
||||
def get_include_protoc_args(includes): |
||||
"""Returns protoc args that imports protos relative to their import root. |
||||
|
||||
Args: |
||||
includes: A list of included proto files. |
||||
|
||||
Returns: |
||||
A list of arguments to be passed to protoc. For example, ["--proto_path=."]. |
||||
""" |
||||
return [ |
||||
"--proto_path={}".format(_get_include_directory(include)) |
||||
for include in includes |
||||
] |
||||
|
||||
def get_plugin_args(plugin, flags, dir_out, generate_mocks): |
||||
"""Returns arguments configuring protoc to use a plugin for a language. |
||||
|
||||
Args: |
||||
plugin: An executable file to run as the protoc plugin. |
||||
flags: The plugin flags to be passed to protoc. |
||||
dir_out: The output directory for the plugin. |
||||
generate_mocks: A bool indicating whether to generate mocks. |
||||
|
||||
Returns: |
||||
A list of protoc arguments configuring the plugin. |
||||
""" |
||||
augmented_flags = list(flags) |
||||
if generate_mocks: |
||||
augmented_flags.append("generate_mock_code=true") |
||||
return [ |
||||
"--plugin=protoc-gen-PLUGIN=" + plugin.path, |
||||
"--PLUGIN_out=" + ",".join(augmented_flags) + ":" + dir_out, |
||||
] |
@ -0,0 +1,186 @@ |
||||
"""Generates and compiles Python gRPC stubs from proto_library rules.""" |
||||
|
||||
load("@grpc_python_dependencies//:requirements.bzl", "requirement") |
||||
load( |
||||
"//bazel:protobuf.bzl", |
||||
"get_include_protoc_args", |
||||
"get_plugin_args", |
||||
"get_proto_root", |
||||
"proto_path_to_generated_filename", |
||||
) |
||||
|
||||
_GENERATED_PROTO_FORMAT = "{}_pb2.py" |
||||
_GENERATED_GRPC_PROTO_FORMAT = "{}_pb2_grpc.py" |
||||
|
||||
def _get_staged_proto_file(context, source_file): |
||||
if source_file.dirname == context.label.package: |
||||
return source_file |
||||
else: |
||||
copied_proto = context.actions.declare_file(source_file.basename) |
||||
context.actions.run_shell( |
||||
inputs = [source_file], |
||||
outputs = [copied_proto], |
||||
command = "cp {} {}".format(source_file.path, copied_proto.path), |
||||
mnemonic = "CopySourceProto", |
||||
) |
||||
return copied_proto |
||||
|
||||
def _generate_py_impl(context): |
||||
protos = [] |
||||
for src in context.attr.deps: |
||||
for file in src.proto.direct_sources: |
||||
protos.append(_get_staged_proto_file(context, file)) |
||||
includes = [ |
||||
file |
||||
for src in context.attr.deps |
||||
for file in src.proto.transitive_imports |
||||
] |
||||
proto_root = get_proto_root(context.label.workspace_root) |
||||
format_str = (_GENERATED_GRPC_PROTO_FORMAT if context.executable.plugin else _GENERATED_PROTO_FORMAT) |
||||
out_files = [ |
||||
context.actions.declare_file( |
||||
proto_path_to_generated_filename( |
||||
proto.basename, |
||||
format_str, |
||||
), |
||||
) |
||||
for proto in protos |
||||
] |
||||
|
||||
arguments = [] |
||||
tools = [context.executable._protoc] |
||||
if context.executable.plugin: |
||||
arguments += get_plugin_args( |
||||
context.executable.plugin, |
||||
context.attr.flags, |
||||
context.genfiles_dir.path, |
||||
False, |
||||
) |
||||
tools += [context.executable.plugin] |
||||
else: |
||||
arguments += [ |
||||
"--python_out={}:{}".format( |
||||
",".join(context.attr.flags), |
||||
context.genfiles_dir.path, |
||||
), |
||||
] |
||||
|
||||
arguments += get_include_protoc_args(includes) |
||||
arguments += [ |
||||
"--proto_path={}".format(context.genfiles_dir.path) |
||||
for proto in protos |
||||
] |
||||
for proto in protos: |
||||
massaged_path = proto.path |
||||
if massaged_path.startswith(context.genfiles_dir.path): |
||||
massaged_path = proto.path[len(context.genfiles_dir.path) + 1:] |
||||
arguments.append(massaged_path) |
||||
|
||||
well_known_proto_files = [] |
||||
if context.attr.well_known_protos: |
||||
well_known_proto_directory = context.attr.well_known_protos.files.to_list( |
||||
)[0].dirname |
||||
|
||||
arguments += ["-I{}".format(well_known_proto_directory + "/../..")] |
||||
well_known_proto_files = context.attr.well_known_protos.files.to_list() |
||||
|
||||
context.actions.run( |
||||
inputs = protos + includes + well_known_proto_files, |
||||
tools = tools, |
||||
outputs = out_files, |
||||
executable = context.executable._protoc, |
||||
arguments = arguments, |
||||
mnemonic = "ProtocInvocation", |
||||
) |
||||
return struct(files = depset(out_files)) |
||||
|
||||
__generate_py = rule( |
||||
attrs = { |
||||
"deps": attr.label_list( |
||||
mandatory = True, |
||||
allow_empty = False, |
||||
providers = ["proto"], |
||||
), |
||||
"plugin": attr.label( |
||||
executable = True, |
||||
providers = ["files_to_run"], |
||||
cfg = "host", |
||||
), |
||||
"flags": attr.string_list( |
||||
mandatory = False, |
||||
allow_empty = True, |
||||
), |
||||
"well_known_protos": attr.label(mandatory = False), |
||||
"_protoc": attr.label( |
||||
default = Label("//external:protocol_compiler"), |
||||
executable = True, |
||||
cfg = "host", |
||||
), |
||||
}, |
||||
output_to_genfiles = True, |
||||
implementation = _generate_py_impl, |
||||
) |
||||
|
||||
def _generate_py(well_known_protos, **kwargs): |
||||
if well_known_protos: |
||||
__generate_py( |
||||
well_known_protos = "@com_google_protobuf//:well_known_protos", |
||||
**kwargs |
||||
) |
||||
else: |
||||
__generate_py(**kwargs) |
||||
|
||||
def py_proto_library( |
||||
name, |
||||
deps, |
||||
well_known_protos = True, |
||||
proto_only = False, |
||||
**kwargs): |
||||
"""Generate python code for a protobuf. |
||||
|
||||
Args: |
||||
name: The name of the target. |
||||
deps: A list of dependencies. Must contain a single element. |
||||
well_known_protos: A bool indicating whether or not to include well-known |
||||
protos. |
||||
proto_only: A bool indicating whether to generate vanilla protobuf code |
||||
or to also generate gRPC code. |
||||
""" |
||||
if len(deps) > 1: |
||||
fail("The supported length of 'deps' is 1.") |
||||
|
||||
codegen_target = "_{}_codegen".format(name) |
||||
codegen_grpc_target = "_{}_grpc_codegen".format(name) |
||||
|
||||
_generate_py( |
||||
name = codegen_target, |
||||
deps = deps, |
||||
well_known_protos = well_known_protos, |
||||
**kwargs |
||||
) |
||||
|
||||
if not proto_only: |
||||
_generate_py( |
||||
name = codegen_grpc_target, |
||||
deps = deps, |
||||
plugin = "//:grpc_python_plugin", |
||||
well_known_protos = well_known_protos, |
||||
**kwargs |
||||
) |
||||
|
||||
native.py_library( |
||||
name = name, |
||||
srcs = [ |
||||
":{}".format(codegen_grpc_target), |
||||
":{}".format(codegen_target), |
||||
], |
||||
deps = [requirement("protobuf")], |
||||
**kwargs |
||||
) |
||||
else: |
||||
native.py_library( |
||||
name = name, |
||||
srcs = [":{}".format(codegen_target), ":{}".format(codegen_target)], |
||||
deps = [requirement("protobuf")], |
||||
**kwargs |
||||
) |
@ -1,4 +1,4 @@ |
||||
set noparent |
||||
@jtattermusch |
||||
@nicolasnoble |
||||
@matt-kwong |
||||
@apolcyn |
||||
|
@ -0,0 +1,15 @@ |
||||
# Copyright 2018 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
set(_gRPC_NANOPB_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/nanopb") |
@ -0,0 +1,32 @@ |
||||
# Polling Engine Usage on gRPC client and Server |
||||
|
||||
_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ |
||||
|
||||
|
||||
This document talks about how polling engine is used in gRPC core (both on client and server code paths). |
||||
|
||||
## gRPC client |
||||
|
||||
### Relation between Call, Channel (sub-channels), Completion queue, `grpc_pollset` |
||||
- A gRPC Call is tied to a channel (more specifically a sub-channel) and a completion queue for the lifetime of the call. |
||||
- Once a _sub-channel_ is picked for the call, the file-descriptor (socket fd in case of TCP channels) is added to the pollset corresponding to call's completion queue. (Recall that as per [grpc-cq](grpc-cq.md), a completion queue has a pollset by default) |
||||
|
||||
 |
||||
|
||||
|
||||
### Making progress on Async `connect()` on sub-channels (`grpc_pollset_set` usecase) |
||||
- A gRPC channel is created between a client and a 'target'. The 'target' may resolve in to one or more backend servers. |
||||
- A sub-channel is the 'connection' from a client to the backend server |
||||
- While establishing sub-channels (i.e connections) to the backends, gRPC issues async [`connect()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/tcp_client_posix.cc#L296) calls which may not complete right away. When the `connect()` eventually succeeds, the socket fd is make 'writable' |
||||
- This means that the polling engine must be monitoring all these sub-channel `fd`s for writable events and we need to make sure there is a polling thread that monitors all these fds |
||||
- To accomplish this, the `grpc_pollset_set` is used the following way (see picture below) |
||||
|
||||
 |
||||
|
||||
## gRPC server |
||||
|
||||
- The listening fd (i.e., the socket fd corresponding to the server listening port) is added to each of the server completion queues. Note that in gRPC we use SO_REUSEPORT option and create multiple listening fds but all of them map to the same listening port |
||||
- A new incoming channel is assigned to some server completion queue picked randomly (note that we currently [round-robin](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/tcp_server_posix.cc#L231) over the server completion queues) |
||||
|
||||
 |
||||
|
@ -0,0 +1,64 @@ |
||||
# gRPC Completion Queue |
||||
|
||||
_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ |
||||
|
||||
Code: [completion_queue.cc](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/surface/completion_queue.cc) |
||||
|
||||
This document gives an overview of completion queue architecture and focuses mainly on the interaction between completion queue and the Polling engine layer. |
||||
|
||||
## Completion queue attributes |
||||
Completion queue has two attributes |
||||
|
||||
- Completion_type: |
||||
- GRPC_CQ_NEXT: grpc_completion_queue_next() can be called (but not grpc_completion_queue_pluck()) |
||||
- GRPC_CQ_PLUCK: grpc_completion_queue_pluck() can be called (but not grpc_completion_queue_next()) |
||||
- GRPC_CQ_CALLBACK: The tags in the queue are function pointers to callbacks. Also, neither next() nor pluck() can be called on this |
||||
|
||||
- Polling_type: |
||||
- GRPC_CQ_NON_POLLING: Threads calling completion_queue_next/pluck do not do any polling |
||||
- GRPC_CQ_DEFAULT_POLLING: Threads calling completion_queue_next/pluck do polling |
||||
- GRPC_CQ_NON_LISTENING: Functionally similar to default polling except for a boolean attribute that states that the cq is non-listening. This is used by the grpc-server code to not associate any listening sockets with this completion-queue’s pollset |
||||
|
||||
|
||||
## Details |
||||
|
||||
 |
||||
|
||||
|
||||
### **grpc\_completion\_queue\_next()** & **grpc_completion_queue_pluck()** APIS |
||||
|
||||
|
||||
``` C++ |
||||
grpc_completion_queue_next(cq, deadline)/pluck(cq, deadline, tag) { |
||||
while(true) { |
||||
\\ 1. If an event is queued in the completion queue, dequeue and return |
||||
\\ (in case of pluck() dequeue only if the tag is the one we are interested in) |
||||
|
||||
\\ 2. If completion queue shutdown return |
||||
|
||||
\\ 3. In case of pluck, add (tag, worker) pair to the tag<->worker map on the cq |
||||
|
||||
\\ 4. Call grpc_pollset_work(cq’s-pollset, deadline) to do polling |
||||
\\ Note that if this function found some fds to be readable/writable/error, |
||||
\\ it would have scheduled those closures (which may queue completion events |
||||
\\ on SOME completion queue - not necessarily this one) |
||||
} |
||||
} |
||||
``` |
||||
|
||||
### Queuing a completion event (i.e., "tag") |
||||
|
||||
``` C++ |
||||
grpc_cq_end_op(cq, tag) { |
||||
\\ 1. Queue the tag in the event queue |
||||
|
||||
\\ 2. Find the pollset corresponding to the completion queue |
||||
\\ (i) If the cq is of type GRPC_CQ_NEXT, then KICK ANY worker |
||||
\\ i.e., call grpc_pollset_kick(pollset, nullptr) |
||||
\\ (ii) If the cq is of type GRPC_CQ_PLUCK, then search the tag<->worker |
||||
\\ map on the completion queue to find the worker. Then specifically |
||||
\\ kick that worker i.e call grpc_pollset_kick(pollset, worker) |
||||
} |
||||
|
||||
``` |
||||
|
@ -0,0 +1,152 @@ |
||||
# Polling Engines |
||||
|
||||
_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ |
||||
|
||||
|
||||
## Why do we need a 'polling engine' ? |
||||
|
||||
Polling engine component was created for the following reasons: |
||||
|
||||
- gRPC code deals with a bunch of file descriptors on which events like descriptor being readable/writable/error have to be monitored |
||||
- gRPC code knows the actions to perform when such events happen |
||||
- For example: |
||||
- `grpc_endpoint` code calls `recvmsg` call when the fd is readable and `sendmsg` call when the fd is writable |
||||
- ` tcp_client` connect code issues async `connect` and finishes creating the client once the fd is writable (i.e when the `connect` actually finished) |
||||
- gRPC needed some component that can "efficiently" do the above operations __using the threads provided by the applications (i.e., not create any new threads)__. Also by "efficiently" we mean optimized for latency and throughput |
||||
|
||||
|
||||
## Polling Engine Implementations in gRPC |
||||
There are multiple polling engine implementations depending on the OS and the OS version. Fortunately all of them expose the same interface |
||||
|
||||
- Linux: |
||||
|
||||
- **`epollex`** (default but requires kernel version >= 4.5), |
||||
- `epoll1` (If `epollex` is not available and glibc version >= 2.9) |
||||
- `poll` (If kernel does not have epoll support) |
||||
- Mac: **`poll`** (default) |
||||
- Windows: (no name) |
||||
- One-off polling engines: |
||||
- NodeJS : `libuv` polling engine implementation (requires different compile `#define`s) |
||||
|
||||
## Polling Engine Interface |
||||
|
||||
### Opaque Structures exposed by the polling engine |
||||
The following are the **Opaque** structures exposed by Polling Engine interface (NOTE: Different polling engine implementations have different definitions of these structures) |
||||
|
||||
- **grpc_fd:** Structure representing a file descriptor |
||||
- **grpc_pollset:** A set of one or more grpc_fds that are ‘polled’ for readable/writable/error events. One grpc_fd can be in multiple `grpc_pollset`s |
||||
- **grpc_pollset_worker:** Structure representing a ‘polling thread’ - more specifically, the thread that calls `grpc_pollset_work()` API |
||||
- **grpc_pollset_set:** A group of `grpc_fds`, `grpc_pollsets` and `grpc_pollset_sets` (yes, a `grpc_pollset_set` can contain other `grpc_pollset_sets`) |
||||
|
||||
### Polling engine API |
||||
|
||||
#### grpc_fd |
||||
- **grpc\_fd\_notify\_on\_[read|write|error]** |
||||
- Signature: `grpc_fd_notify_on_(grpc_fd* fd, grpc_closure* closure)` |
||||
- Register a [closure](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/closure.h#L67) to be called when the fd becomes readable/writable or has an error (In grpc parlance, we refer to this act as “arming the fd”) |
||||
- The closure is called exactly once per event. I.e once the fd becomes readable (or writable or error), the closure is fired and the fd is ‘unarmed’. To be notified again, the fd has to be armed again. |
||||
|
||||
- **grpc_fd_shutdown** |
||||
- Signature: `grpc_fd_shutdown(grpc_fd* fd)` |
||||
- Any current (or future) closures registered for readable/writable/error events are scheduled immediately with an error |
||||
|
||||
- **grpc_fd_orphan** |
||||
- Signature: `grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, char* reason)` |
||||
- Release the `grpc_fd` structure and call `on_done` closure when the operation is complete |
||||
- If `release_fd` is set to `nullptr`, then `close()` the underlying fd as well. If not, put the underlying fd in `release_fd` (and do not call `close()`) |
||||
- `release_fd` set to non-null in cases where the underlying fd is NOT owned by grpc core (like for example the fds used by C-Ares DNS resolver ) |
||||
|
||||
#### grpc_pollset |
||||
|
||||
- **grpc_pollset_add_fd ** |
||||
- Signature: `grpc_pollset_add_fd(grpc_pollset* ps, grpc_fd *fd)` |
||||
- Add fd to pollset |
||||
> **NOTE**: There is no `grpc_pollset_remove_fd`. This is because calling `grpc_fd_orphan()` will effectively remove the fd from all the pollsets it’s a part of |
||||
|
||||
- ** grpc_pollset_work ** |
||||
- Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_millis deadline)` |
||||
> **NOTE**: `grpc_pollset_work()` requires the pollset mutex to be locked before calling it. Shortly after calling `grpc_pollset_work()`, the function populates the `*worker` pointer (among other things) and releases the mutex. Once `grpc_pollset_work()` returns, the `*worker` pointer is **invalid** and should not be used anymore. See the code in `completion_queue.cc` to see how this is used. |
||||
- Poll the fds in the pollset for events AND return when ANY of the following is true: |
||||
- Deadline expired |
||||
- Some fds in the pollset were found to be readable/writable/error and those associated closures were ‘scheduled’ (but not necessarily executed) |
||||
- worker is “kicked” (see `grpc_pollset_kick` for more details) |
||||
|
||||
- **grpc_pollset_kick** |
||||
- Signature: `grpc_pollset_kick(grpc_pollset* ps, grpc_pollset_worker* worker)` |
||||
- “Kick the worker” i.e Force the worker to return from grpc_pollset_work() |
||||
- If `worker == nullptr`, kick ANY worker active on that pollset |
||||
|
||||
#### grpc_pollset_set |
||||
|
||||
- **grpc\_pollset\_set\_[add|del]\_fd** |
||||
- Signature: `grpc_pollset_set_[add|del]_fd(grpc_pollset_set* pss, grpc_fd *fd)` |
||||
Add/Remove fd to the `grpc_pollset_set` |
||||
|
||||
- **grpc\_pollset\_set_[add|del]\_pollset** |
||||
- Signature: `grpc_pollset_set_[add|del]_pollset(grpc_pollset_set* pss, grpc_pollset* ps)` |
||||
- What does adding a pollset to a pollset_set mean ? |
||||
- It means that calling `grpc_pollset_work()` on the pollset will also poll all the fds in the pollset_set i.e semantically, it is similar to adding all the fds inside pollset_set to the pollset. |
||||
- This guarantee is no longer true once the pollset is removed from the pollset_set |
||||
|
||||
- **grpc\_pollset\_set_[add|del]\_pollset\_set** |
||||
- Signature: `grpc_pollset_set_[add|del]_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item)` |
||||
- Semantically, this is similar to adding all the fds in the ‘bag’ pollset_set to the ‘item’ pollset_set |
||||
|
||||
|
||||
#### Recap: |
||||
|
||||
__Relation between grpc_pollset_worker, grpc_pollset and grpc_fd:__ |
||||
|
||||
 |
||||
|
||||
__grpc_pollset_set__ |
||||
|
||||
 |
||||
|
||||
|
||||
## Polling Engine Implementations |
||||
|
||||
### epoll1 |
||||
|
||||
 |
||||
|
||||
Code at `src/core/lib/iomgr/ev_epoll1_posix.cc` |
||||
|
||||
- The logic to choose a designated poller is quite complicated. Pollsets are internally sharded into what are called `pollset_neighborhood` (a structure internal to `epoll1` polling engine implementation). `grpc_pollset_workers` that call `grpc_pollset_work` on a given pollset are all queued in a linked-list against the `grpc_pollset`. The head of the linked list is called "root worker" |
||||
|
||||
- There are as many neighborhoods as the number of cores. A pollset is put in a neighborhood based on the CPU core of the root worker thread. When picking the next designated poller, we always try to find another worker on the current pollset. If there are no more workers in the current pollset, a `pollset_neighborhood` listed is scanned to pick the next pollset and worker that could be the new designated poller. |
||||
- NOTE: There is room to tune this implementation. All we really need is good way to maintain a list of `grpc_pollset_workers` with a way to group them per-pollset (needed to implement `grpc_pollset_kick` semantics) and a way randomly select a new designated poller |
||||
|
||||
- See [`begin_worker()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/ev_epoll1_linux.cc#L729) function to see how a designated poller is chosen. Similarly [`end_worker()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/ev_epoll1_linux.cc#L916) function is called by the worker that was just out of `epoll_wait()` and will have to choose a new designated poller) |
||||
|
||||
|
||||
### epollex |
||||
|
||||
 |
||||
|
||||
Code at `src/core/lib/iomgr/ev_epollex_posix.cc` |
||||
|
||||
- FDs are added to multiple epollsets with EPOLLEXCLUSIVE flag. This prevents multiple worker threads from waking up from polling whenever the fd is readable/writable |
||||
|
||||
- A few observations: |
||||
|
||||
- If multiple pollsets are pointing to the same `Pollable`, then the `pollable` MUST be either empty or of type `PO_FD` (i.e single-fd) |
||||
- A multi-pollable has one-and-only-one incoming link from a pollset |
||||
- The same FD can be in multiple `Pollable`s (even if one of the `Pollable`s is of type PO_FD) |
||||
- There cannot be two `Pollable`s of type PO_FD for the same fd |
||||
|
||||
- Why do we need `Pollable` of type PO_FD and PO_EMPTY ? |
||||
- The main reason is the Sync client API |
||||
- We create one new completion queue per call. If we didn’t have PO_EMPTY and PO_FD type pollables, then every call on a given channel will effectively have to create a `Pollable` and hence an epollset. This is because every completion queue automatically creates a pollset and the channel fd will have to be put in that pollset. This clearly requires an epollset to put that fd. Creating an epollset per call (even if we delete the epollset once the call is completed) would mean a lot of sys calls to create/delete epoll fds. This is clearly not a good idea. |
||||
- With these new types of `Pollable`s, all pollsets (corresponding to the new per-call completion queue) will initially point to PO_EMPTY global epollset. Then once the channel fd is added to the pollset, the pollset will point to the `Pollable` of type PO_FD containing just that fd (i.e it will reuse the existing `Pollable`). This way, the epoll fd creation/deletion churn is avoided. |
||||
|
||||
|
||||
### Other polling engine implementations (poll and windows polling engine) |
||||
- **poll** polling engine: gRPC's `poll` polling engine is quite complicated. It uses the `poll()` function to do the polling (and hence it is for platforms like osx where epoll is not available) |
||||
- The implementation is further complicated by the fact that poll() is level triggered (just keep this in mind in case you wonder why the code at `src/core/lib/iomgr/ev_poll_posix.cc` is written a certain/seemingly complicated way :)) |
||||
|
||||
- **Polling engine on Windows**: Windows polling engine looks nothing like other polling engines |
||||
- Unlike the grpc polling engines for Unix systems (epollex, epoll1 and poll) Windows endpoint implementation and polling engine implementations are very closely tied together |
||||
- Windows endpoint read/write API implementations use the Windows IO API which require specifying an [I/O completion port](https://docs.microsoft.com/en-us/windows/desktop/fileio/i-o-completion-ports) |
||||
- In Windows polling engine’s grpc_pollset_work() implementation, ONE of the threads is chosen to wait on the I/O completion port while other threads wait on a condition variable (much like the turnstile polling in epollex/epoll1) |
||||
|
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 44 KiB |
@ -0,0 +1,54 @@ |
||||
# gRPC C# Server Reflection |
||||
|
||||
This document shows how to use gRPC Server Reflection in gRPC C#. |
||||
Please see [C++ Server Reflection Tutorial](../server_reflection_tutorial.md) |
||||
for general information and more examples how to use server reflection. |
||||
|
||||
## Enable server reflection in C# servers |
||||
|
||||
C# Server Reflection is an add-on library. |
||||
To use it, first install the [Grpc.Reflection](https://www.nuget.org/packages/Grpc.Reflection/) |
||||
Nuget package into your project. |
||||
|
||||
Note that with C# you need to manually register the service |
||||
descriptors with the reflection service implementation when creating a server |
||||
(this isn't necessary with e.g. C++ or Java) |
||||
```csharp |
||||
// the reflection service will be aware of "Greeter" and "ServerReflection" services. |
||||
var reflectionServiceImpl = new ReflectionServiceImpl(Greeter.Descriptor, ServerReflection.Descriptor); |
||||
server = new Server() |
||||
{ |
||||
Services = |
||||
{ |
||||
// the server will serve 2 services, the Greeter and the ServerReflection |
||||
ServerReflection.BindService(new GreeterImpl()), |
||||
ServerReflection.BindService(reflectionServiceImpl) |
||||
}, |
||||
Ports = { { "localhost", 50051, ServerCredentials.Insecure } } |
||||
}; |
||||
server.Start(); |
||||
``` |
||||
|
||||
After starting the server, you can verify that the server reflection |
||||
is working properly by using the [`grpc_cli` command line |
||||
tool](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md): |
||||
|
||||
```sh |
||||
$ grpc_cli ls localhost:50051 |
||||
``` |
||||
|
||||
output: |
||||
```sh |
||||
helloworld.Greeter |
||||
grpc.reflection.v1alpha.ServerReflection |
||||
``` |
||||
|
||||
For more examples and instructions how to use the `grpc_cli` tool, |
||||
please refer to the [`grpc_cli` documentation](../command_line_tool.md) |
||||
and the [C++ Server Reflection Tutorial](../server_reflection_tutorial.md). |
||||
|
||||
## Additional Resources |
||||
|
||||
The [Server Reflection Protocol](../server-reflection.md) provides detailed |
||||
information about how the server reflection works and describes the server reflection |
||||
protocol in detail. |
@ -0,0 +1,22 @@ |
||||
# gRPC Release Schedule |
||||
|
||||
Below is the release schedule for gRPC [Java](https://github.com/grpc/grpc-java/releases), [Go](https://github.com/grpc/grpc-go/releases) and [Core](https://github.com/grpc/grpc/releases) and its dependent languages C++, C#, Objective-C, PHP, Python and Ruby. |
||||
|
||||
Releases are scheduled every six weeks on Tuesdays on a best effort basis. In some unavoidable situations a release may be delayed or a language may skip a release altogether and do the next release to catch up with other languages. See the past releases in the links above. A six-week cycle gives us a good balance between delivering new features/fixes quickly and keeping the release overhead low. |
||||
|
||||
Releases are cut from release branches. For Core and Java repos, the release branch is cut two weeks before the scheduled release date. For Go, the branch is cut just before the release. An RC (release candidate) is published for Core and its dependent languages just after the branch cut. This RC is later promoted to release version if no further changes are made to the release branch. We do our best to keep head of master branch stable at all times regardless of release schedule. Daily build packages from master branch for C#, PHP, Python, Ruby and Protoc plugins are published on [packages.grpc.io](https://packages.grpc.io/). If you depend on gRPC in production we recommend to set up your CI system to test the RCs and, if possible, the daily builds. |
||||
|
||||
Names of gRPC releases are [here](https://github.com/grpc/grpc/blob/master/doc/g_stands_for.md). |
||||
|
||||
Release |Scheduled Branch Cut|Scheduled Release Date |
||||
--------|--------------------|------------- |
||||
v1.17.0 |Nov 19, 2018 |Dec 4, 2018 |
||||
v1.18.0 |Jan 2, 2019 |Jan 15, 2019 |
||||
v1.19.0 |Feb 12, 2019 |Feb 26, 2019 |
||||
v1.20.0 |Mar 26, 2019 |Apr 9, 2019 |
||||
v1.21.0 |May 7, 2019 |May 21, 2019 |
||||
v1.22.0 |Jun 18, 2019 |Jul 2, 2019 |
||||
v1.23.0 |Jul 30, 2019 |Aug 13, 2019 |
||||
v1.24.0 |Sept 10, 2019 |Sept 24, 2019 |
||||
v1.25.0 |Oct 22, 2019 |Nov 5, 2019 |
||||
v1.26.0 |Dec 3, 2019 |Dec 17, 2019 |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 51 KiB |
After Width: | Height: | Size: 24 KiB |
After Width: | Height: | Size: 31 KiB |
After Width: | Height: | Size: 41 KiB |
@ -0,0 +1,52 @@ |
||||
# Keepalive User Guide for gRPC Core (and dependants) |
||||
|
||||
The keepalive ping is a way to check if a channel is currently working by sending HTTP2 pings over the transport. It is sent periodically, and if the ping is not acknowledged by the peer within a certain timeout period, the transport is disconnected. |
||||
|
||||
This guide documents the knobs within gRPC core to control the current behavior of the keepalive ping. |
||||
|
||||
The keepalive ping is controlled by two important channel arguments - |
||||
|
||||
* **GRPC_ARG_KEEPALIVE_TIME_MS** |
||||
* This channel argument controls the period (in milliseconds) after which a keepalive ping is sent on the transport. |
||||
* **GRPC_ARG_KEEPALIVE_TIMEOUT_MS** |
||||
* This channel argument controls the amount of time (in milliseconds) the sender of the keepalive ping waits for an acknowledgement. If it does not receive an acknowledgment within this time, it will close the connection. |
||||
|
||||
The above two channel arguments should be sufficient for most users, but the following arguments can also be useful in certain use cases. |
||||
|
||||
* **GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS** |
||||
* This channel argument if set to 1 (0 : false; 1 : true), allows keepalive pings to be sent even if there are no calls in flight. |
||||
* **GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA** |
||||
* This channel argument controls the maximum number of pings that can be sent when there is no other data (data frame or header frame) to be sent. GRPC Core will not continue sending pings if we run over the limit. Setting it to 0 allows sending pings without sending data. |
||||
* **GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS** |
||||
* If there is no data being sent on the transport, this channel argument controls the minimum time (in milliseconds) gRPC Core will wait between successive pings. |
||||
* **GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS** |
||||
* If there is no data being sent on the transport, this channel argument on the server side controls the minimum time (in milliseconds) that gRPC Core would expect between receiving successive pings. If the time between successive pings is less that than this time, then the ping will be considered a bad ping from the peer. Such a ping counts as a ‘ping strike’. |
||||
On the client side, this does not have any effect. |
||||
* **GRPC_ARG_HTTP2_MAX_PING_STRIKES** |
||||
* This arg controls the maximum number of bad pings that the server will tolerate before sending an HTTP2 GOAWAY frame and closing the transport. Setting it to 0 allows the server to accept any number of bad pings. |
||||
|
||||
### Defaults Values |
||||
|
||||
Channel Argument| Client|Server |
||||
----------------|-------|------ |
||||
GRPC_ARG_KEEPALIVE_TIME_MS|INT_MAX (disabled)|7200000 (2 hours) |
||||
GRPC_ARG_KEEPALIVE_TIMEOUT_MS|20000 (20 seconds)|20000 (20 seconds) |
||||
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS|0 (false)|0 (false) |
||||
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA|2|2 |
||||
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS|300000 (5 minutes)|300000 (5 minutes) |
||||
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS|N/A|300000 (5 minutes) |
||||
GRPC_ARG_HTTP2_MAX_PING_STRIKES|N/A|2 |
||||
|
||||
### FAQ |
||||
* When is the keepalive timer started? |
||||
* The keepalive timer is started when a transport is done connecting (after handshake). |
||||
* What happens when the keepalive timer fires? |
||||
* When the keepalive timer fires, gRPC Core will try to send a keepalive ping on the transport. This ping can be blocked if - |
||||
* there is no active call on that transport and GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS is false. |
||||
* the number of pings already sent on the transport without any data has already exceeded GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA. |
||||
* the time elapsed since the previous ping is less than GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS. |
||||
* If a keepalive ping is not blocked and is sent on the transport, then the keepalive watchdog timer is started which will close the transport if the ping is not acknowledged before it fires. |
||||
* Why am I receiving a GOAWAY with error code ENHANCE_YOUR_CALM? |
||||
* A server sends a GOAWAY with ENHANCE_YOUR_CALM if the client sends too many misbehaving pings. For example - |
||||
* if a server has GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS set to false and the client sends pings without there being any call in flight. |
||||
* if the client's GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS setting is lower than the server's GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS. |
@ -0,0 +1,61 @@ |
||||
# gRPC Python Server Reflection |
||||
|
||||
This document shows how to use gRPC Server Reflection in gRPC Python. |
||||
Please see [C++ Server Reflection Tutorial](../server_reflection_tutorial.md) |
||||
for general information and more examples how to use server reflection. |
||||
|
||||
## Enable server reflection in Python servers |
||||
|
||||
gRPC Python Server Reflection is an add-on library. |
||||
To use it, first install the [grpcio-reflection](https://pypi.org/project/grpcio-reflection/) |
||||
PyPI package into your project. |
||||
|
||||
Note that with Python you need to manually register the service |
||||
descriptors with the reflection service implementation when creating a server |
||||
(this isn't necessary with e.g. C++ or Java) |
||||
```python |
||||
# add the following import statement to use server reflection |
||||
from grpc_reflection.v1alpha import reflection |
||||
# ... |
||||
def serve(): |
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) |
||||
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) |
||||
# the reflection service will be aware of "Greeter" and "ServerReflection" services. |
||||
SERVICE_NAMES = ( |
||||
helloworld_pb2.DESCRIPTOR.services_by_name['Greeter'].full_name, |
||||
reflection.SERVICE_NAME, |
||||
) |
||||
reflection.enable_server_reflection(SERVICE_NAMES, server) |
||||
server.add_insecure_port('[::]:50051') |
||||
server.start() |
||||
``` |
||||
|
||||
Please see |
||||
[greeter_server_with_reflection.py](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server_with_reflection.py) |
||||
in the examples directory for the full example, which extends the gRPC [Python |
||||
`Greeter` example](https://github.com/grpc/tree/master/examples/python/helloworld) on a |
||||
reflection-enabled server. |
||||
|
||||
After starting the server, you can verify that the server reflection |
||||
is working properly by using the [`grpc_cli` command line |
||||
tool](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md): |
||||
|
||||
```sh |
||||
$ grpc_cli ls localhost:50051 |
||||
``` |
||||
|
||||
output: |
||||
```sh |
||||
grpc.reflection.v1alpha.ServerReflection |
||||
helloworld.Greeter |
||||
``` |
||||
|
||||
For more examples and instructions how to use the `grpc_cli` tool, |
||||
please refer to the [`grpc_cli` documentation](../command_line_tool.md) |
||||
and the [C++ Server Reflection Tutorial](../server_reflection_tutorial.md). |
||||
|
||||
## Additional Resources |
||||
|
||||
The [Server Reflection Protocol](../server-reflection.md) provides detailed |
||||
information about how the server reflection works and describes the server reflection |
||||
protocol in detail. |
@ -0,0 +1,106 @@ |
||||
# Copyright 2018 The gRPC Authors |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
# -- Path setup -------------------------------------------------------------- |
||||
|
||||
import os |
||||
import sys |
||||
PYTHON_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), |
||||
'..', '..', '..', 'src', 'python') |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio')) |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_channelz')) |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_health_checking')) |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_reflection')) |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_status')) |
||||
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_testing')) |
||||
|
||||
# -- Project information ----------------------------------------------------- |
||||
|
||||
project = 'gRPC Python' |
||||
copyright = '2018, The gRPC Authors' |
||||
author = 'The gRPC Authors' |
||||
|
||||
# Import generated grpc_version after the path been modified |
||||
import grpc_version |
||||
version = ".".join(grpc_version.VERSION.split(".")[:3]) |
||||
release = grpc_version.VERSION |
||||
|
||||
# -- General configuration --------------------------------------------------- |
||||
|
||||
templates_path = ['_templates'] |
||||
source_suffix = ['.rst', '.md'] |
||||
master_doc = 'index' |
||||
language = 'en' |
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] |
||||
pygments_style = None |
||||
|
||||
# --- Extensions Configuration ----------------------------------------------- |
||||
|
||||
extensions = [ |
||||
'sphinx.ext.autodoc', |
||||
'sphinx.ext.viewcode', |
||||
'sphinx.ext.todo', |
||||
'sphinx.ext.napoleon', |
||||
'sphinx.ext.coverage', |
||||
] |
||||
|
||||
napoleon_google_docstring = True |
||||
napoleon_numpy_docstring = True |
||||
napoleon_include_special_with_doc = True |
||||
|
||||
autodoc_default_options = { |
||||
'members': None, |
||||
} |
||||
|
||||
autodoc_mock_imports = [ |
||||
'grpc._cython', |
||||
'grpc_channelz.v1.channelz_pb2', |
||||
'grpc_channelz.v1.channelz_pb2_grpc', |
||||
'grpc_health.v1.health_pb2', |
||||
'grpc_health.v1.health_pb2_grpc', |
||||
'grpc_reflection.v1alpha.reflection_pb2', |
||||
'grpc_reflection.v1alpha.reflection_pb2_grpc', |
||||
] |
||||
|
||||
# -- HTML Configuration ------------------------------------------------- |
||||
|
||||
html_theme = 'alabaster' |
||||
html_theme_options = { |
||||
'fixed_sidebar': True, |
||||
'page_width': '1140px', |
||||
'show_related': True, |
||||
'analytics_id': 'UA-60127042-1', |
||||
'description': grpc_version.VERSION, |
||||
'show_powered_by': False, |
||||
} |
||||
|
||||
# -- Options for manual page output ------------------------------------------ |
||||
|
||||
man_pages = [(master_doc, 'grpcio', 'grpcio Documentation', [author], 1)] |
||||
|
||||
# -- Options for Texinfo output ---------------------------------------------- |
||||
|
||||
texinfo_documents = [ |
||||
(master_doc, 'grpcio', 'grpcio Documentation', author, 'grpcio', |
||||
'One line description of project.', 'Miscellaneous'), |
||||
] |
||||
|
||||
# -- Options for Epub output ------------------------------------------------- |
||||
|
||||
epub_title = project |
||||
epub_exclude_files = ['search.html'] |
||||
|
||||
# -- Options for todo extension ---------------------------------------------- |
||||
|
||||
todo_include_todos = True |
@ -0,0 +1,16 @@ |
||||
Glossary |
||||
================ |
||||
|
||||
.. glossary:: |
||||
|
||||
metadatum |
||||
A key-value pair included in the HTTP header. It is a |
||||
2-tuple where the first entry is the key and the |
||||
second is the value, i.e. (key, value). The metadata key is an ASCII str, |
||||
and must be a valid HTTP header name. The metadata value can be |
||||
either a valid HTTP ASCII str, or bytes. If bytes are provided, |
||||
the key must end with '-bin', i.e. |
||||
``('binary-metadata-bin', b'\\x00\\xFF')`` |
||||
|
||||
metadata |
||||
A sequence of metadatum. |
@ -0,0 +1,180 @@ |
||||
gRPC |
||||
============= |
||||
|
||||
.. module:: grpc |
||||
|
||||
Tutorial |
||||
-------- |
||||
|
||||
If you want to see gRPC in action first, visit the `Python Quickstart <https://grpc.io/docs/quickstart/python.html>`_. |
||||
Or, if you would like dive in with more extensive usage of gRPC Python, check `gRPC Basics - Python <https://grpc.io/docs/tutorials/basic/python.html>`_ out. |
||||
|
||||
|
||||
Example |
||||
------- |
||||
|
||||
Go to `gRPC Python Examples <https://github.com/grpc/grpc/tree/master/examples/python>`_ |
||||
|
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
Version |
||||
^^^^^^^ |
||||
|
||||
The version string is available as :code:`grpc.__version__`. |
||||
|
||||
Create Client |
||||
^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: insecure_channel |
||||
.. autofunction:: secure_channel |
||||
.. autofunction:: intercept_channel |
||||
|
||||
|
||||
Create Client Credentials |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: ssl_channel_credentials |
||||
.. autofunction:: metadata_call_credentials |
||||
.. autofunction:: access_token_call_credentials |
||||
.. autofunction:: composite_call_credentials |
||||
.. autofunction:: composite_channel_credentials |
||||
|
||||
|
||||
Create Server |
||||
^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: server |
||||
|
||||
|
||||
Create Server Credentials |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: ssl_server_credentials |
||||
.. autofunction:: ssl_server_certificate_configuration |
||||
.. autofunction:: dynamic_ssl_server_credentials |
||||
|
||||
|
||||
RPC Method Handlers |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: unary_unary_rpc_method_handler |
||||
.. autofunction:: unary_stream_rpc_method_handler |
||||
.. autofunction:: stream_unary_rpc_method_handler |
||||
.. autofunction:: stream_stream_rpc_method_handler |
||||
.. autofunction:: method_handlers_generic_handler |
||||
|
||||
|
||||
Channel Ready Future |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autofunction:: channel_ready_future |
||||
|
||||
|
||||
Channel Connectivity |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: ChannelConnectivity |
||||
|
||||
|
||||
gRPC Status Code |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: StatusCode |
||||
|
||||
|
||||
Channel Object |
||||
^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: Channel |
||||
|
||||
|
||||
Server Object |
||||
^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: Server |
||||
|
||||
|
||||
Authentication & Authorization Objects |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: ChannelCredentials |
||||
.. autoclass:: CallCredentials |
||||
.. autoclass:: AuthMetadataContext |
||||
.. autoclass:: AuthMetadataPluginCallback |
||||
.. autoclass:: AuthMetadataPlugin |
||||
.. autoclass:: ServerCredentials |
||||
.. autoclass:: ServerCertificateConfiguration |
||||
|
||||
|
||||
gRPC Exceptions |
||||
^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoexception:: RpcError |
||||
|
||||
|
||||
Shared Context |
||||
^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: RpcContext |
||||
|
||||
|
||||
Client-Side Context |
||||
^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: Call |
||||
|
||||
|
||||
Client-Side Interceptor |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: ClientCallDetails |
||||
.. autoclass:: UnaryUnaryClientInterceptor |
||||
.. autoclass:: UnaryStreamClientInterceptor |
||||
.. autoclass:: StreamUnaryClientInterceptor |
||||
.. autoclass:: StreamStreamClientInterceptor |
||||
|
||||
|
||||
Service-Side Context |
||||
^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: ServicerContext |
||||
|
||||
|
||||
Service-Side Handler |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: RpcMethodHandler |
||||
.. autoclass:: HandlerCallDetails |
||||
.. autoclass:: GenericRpcHandler |
||||
.. autoclass:: ServiceRpcHandler |
||||
|
||||
|
||||
Service-Side Interceptor |
||||
^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: ServerInterceptor |
||||
|
||||
|
||||
Multi-Callable Interfaces |
||||
^^^^^^^^^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: UnaryUnaryMultiCallable |
||||
.. autoclass:: UnaryStreamMultiCallable |
||||
.. autoclass:: StreamUnaryMultiCallable |
||||
.. autoclass:: StreamStreamMultiCallable |
||||
|
||||
|
||||
Future Interfaces |
||||
^^^^^^^^^^^^^^^^^ |
||||
|
||||
.. autoexception:: FutureTimeoutError |
||||
.. autoexception:: FutureCancelledError |
||||
.. autoclass:: Future |
||||
|
||||
|
||||
Compression |
||||
^^^^^^^^^^^ |
||||
|
||||
.. autoclass:: Compression |
@ -0,0 +1,12 @@ |
||||
gRPC Channelz |
||||
==================== |
||||
|
||||
What is gRPC Channelz? |
||||
--------------------------------------------- |
||||
|
||||
Design Document `gRPC Channelz <https://github.com/grpc/proposal/blob/master/A14-channelz.md>`_ |
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
.. automodule:: grpc_channelz.v1.channelz |
@ -0,0 +1,7 @@ |
||||
gRPC Health Checking |
||||
==================== |
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
.. autoclass:: grpc_health.v1.health.HealthServicer |
@ -0,0 +1,19 @@ |
||||
gRPC Reflection |
||||
==================== |
||||
|
||||
What is gRPC reflection? |
||||
--------------------------------------------- |
||||
|
||||
Check this out `gRPC Python Server Reflection <https://github.com/grpc/grpc/blob/master/doc/python/server_reflection.md>`_ |
||||
|
||||
|
||||
Example |
||||
------- |
||||
|
||||
Refer to the GitHub `reflection example <https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server_with_reflection.py>`_ |
||||
|
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
.. automodule:: grpc_reflection.v1alpha.reflection |
@ -0,0 +1,7 @@ |
||||
gRPC Status |
||||
==================== |
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
.. automodule:: grpc_status.rpc_status |
@ -0,0 +1,7 @@ |
||||
gRPC Testing |
||||
==================== |
||||
|
||||
Module Contents |
||||
--------------- |
||||
|
||||
.. automodule:: grpc_testing |
@ -0,0 +1,26 @@ |
||||
Welcome to gRPC Python's documentation! |
||||
======================================= |
||||
|
||||
Version: |version| Release: |release| |
||||
|
||||
API Reference |
||||
============= |
||||
|
||||
.. toctree:: |
||||
:caption: Contents: |
||||
|
||||
grpc |
||||
grpc_channelz |
||||
grpc_health_checking |
||||
grpc_reflection |
||||
grpc_status |
||||
grpc_testing |
||||
glossary |
||||
|
||||
|
||||
Indices and tables |
||||
================== |
||||
|
||||
* :ref:`genindex` |
||||
* :ref:`modindex` |
||||
* :ref:`search` |
@ -0,0 +1,41 @@ |
||||
# SSL in gRPC and performance |
||||
|
||||
The SSL requirement of gRPC isn't necessarily making it easy to integrate. The HTTP/2 protocol requires ALPN support, which is a fairly new handshake protocol only supported by recent implementations. |
||||
|
||||
As a result, we've tried hard to provide a smooth experience to our users when compiling and distributing gRPC, but this may come at performance costs due to this. More specifically, we will sometime build the SSL library by disabling assembly code |
||||
(by setting the `OPENSSL_NO_ASM` option), which can impact performance by an order of magnitude when processing encrypted streams. |
||||
|
||||
## gRPC C++: Building from Source |
||||
|
||||
Build system | Condition | Platform | Uses assembly optimizations |
||||
---|---|---|-- |
||||
Makefile | with OpenSSL 1.0.2 development files | all | :heavy_check_mark: |
||||
Makefile | all other cases | all | :x: |
||||
Bazel | | Linux | :heavy_check_mark: |
||||
Bazel | | MacOS | :heavy_check_mark: |
||||
Bazel | | Windows | :x: |
||||
CMake | boringssl from submodule (default) | Linux or MacOS | :heavy_check_mark: |
||||
CMake | boringssl from submodule (default), generator=Ninja | Windows | :heavy_check_mark: |
||||
CMake | boringssl from submodule (default), generator=Visual Studio | Windows | :x: |
||||
CMake | pre-installed OpenSSL 1.0.2+ (`gRPC_SSL_PROVIDER=package`) | all | :heavy_check_mark: |
||||
|
||||
## Other Languages: Binary/Source Packages |
||||
|
||||
In addition, we are shipping packages for language implementations. These packages are source packages, but also have pre-built binaries being distributed. Building packages from source may give a different result in some cases. |
||||
|
||||
Language | From source | Platform | Uses assembly optimizations |
||||
---|---|---|--- |
||||
C# | n/a | Linux, 64bit | :heavy_check_mark: |
||||
C# | n/a | Linux, 32bit | :x: |
||||
C# | n/a | MacOS | :heavy_check_mark: |
||||
C# | n/a | Windows | :heavy_check_mark: |
||||
Node.JS | n/a | Linux | :heavy_check_mark: |
||||
Node.JS | n/a | MacOS | :heavy_check_mark: |
||||
Node.JS | n/a | Windows | :x: |
||||
Electron | n/a | all | :heavy_check_mark: |
||||
ObjC | Yes | iOS | :x: |
||||
PHP | Yes | all | Same as the `Makefile` case from above |
||||
PHP | No | all | :x: |
||||
Python | n/a | all | :x: |
||||
Ruby | No | all | :x: |
||||
|
@ -0,0 +1,110 @@ |
||||
#
|
||||
# Copyright 2018 gRPC authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
|
||||
SYSTEM ?= $(HOST_SYSTEM)
|
||||
CXX = g++
|
||||
CPPFLAGS += `pkg-config --cflags protobuf grpc`
|
||||
CXXFLAGS += -std=c++11
|
||||
ifeq ($(SYSTEM),Darwin) |
||||
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
|
||||
-lgrpc++_reflection\
|
||||
-ldl
|
||||
else |
||||
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
|
||||
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\
|
||||
-ldl
|
||||
endif |
||||
PROTOC = protoc
|
||||
GRPC_CPP_PLUGIN = grpc_cpp_plugin
|
||||
GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)`
|
||||
|
||||
PROTOS_PATH = ../../protos
|
||||
|
||||
vpath %.proto $(PROTOS_PATH) |
||||
|
||||
all: system-check greeter_client greeter_server |
||||
|
||||
greeter_client: helloworld.pb.o helloworld.grpc.pb.o greeter_client.o |
||||
$(CXX) $^ $(LDFLAGS) -o $@
|
||||
|
||||
greeter_server: helloworld.pb.o helloworld.grpc.pb.o greeter_server.o |
||||
$(CXX) $^ $(LDFLAGS) -o $@
|
||||
|
||||
.PRECIOUS: %.grpc.pb.cc |
||||
%.grpc.pb.cc: %.proto |
||||
$(PROTOC) -I $(PROTOS_PATH) --grpc_out=. --plugin=protoc-gen-grpc=$(GRPC_CPP_PLUGIN_PATH) $<
|
||||
|
||||
.PRECIOUS: %.pb.cc |
||||
%.pb.cc: %.proto |
||||
$(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $<
|
||||
|
||||
clean: |
||||
rm -f *.o *.pb.cc *.pb.h greeter_client greeter_server
|
||||
|
||||
|
||||
# The following is to test your system and ensure a smoother experience.
|
||||
# They are by no means necessary to actually compile a grpc-enabled software.
|
||||
|
||||
PROTOC_CMD = which $(PROTOC)
|
||||
PROTOC_CHECK_CMD = $(PROTOC) --version | grep -q libprotoc.3
|
||||
PLUGIN_CHECK_CMD = which $(GRPC_CPP_PLUGIN)
|
||||
HAS_PROTOC = $(shell $(PROTOC_CMD) > /dev/null && echo true || echo false)
|
||||
ifeq ($(HAS_PROTOC),true) |
||||
HAS_VALID_PROTOC = $(shell $(PROTOC_CHECK_CMD) 2> /dev/null && echo true || echo false)
|
||||
endif |
||||
HAS_PLUGIN = $(shell $(PLUGIN_CHECK_CMD) > /dev/null && echo true || echo false)
|
||||
|
||||
SYSTEM_OK = false
|
||||
ifeq ($(HAS_VALID_PROTOC),true) |
||||
ifeq ($(HAS_PLUGIN),true) |
||||
SYSTEM_OK = true
|
||||
endif |
||||
endif |
||||
|
||||
system-check: |
||||
ifneq ($(HAS_VALID_PROTOC),true) |
||||
@echo " DEPENDENCY ERROR"
|
||||
@echo
|
||||
@echo "You don't have protoc 3.0.0 installed in your path."
|
||||
@echo "Please install Google protocol buffers 3.0.0 and its compiler."
|
||||
@echo "You can find it here:"
|
||||
@echo
|
||||
@echo " https://github.com/google/protobuf/releases/tag/v3.0.0"
|
||||
@echo
|
||||
@echo "Here is what I get when trying to evaluate your version of protoc:"
|
||||
@echo
|
||||
-$(PROTOC) --version
|
||||
@echo
|
||||
@echo
|
||||
endif |
||||
ifneq ($(HAS_PLUGIN),true) |
||||
@echo " DEPENDENCY ERROR"
|
||||
@echo
|
||||
@echo "You don't have the grpc c++ protobuf plugin installed in your path."
|
||||
@echo "Please install grpc. You can find it here:"
|
||||
@echo
|
||||
@echo " https://github.com/grpc/grpc"
|
||||
@echo
|
||||
@echo "Here is what I get when trying to detect if you have the plugin:"
|
||||
@echo
|
||||
-which $(GRPC_CPP_PLUGIN)
|
||||
@echo
|
||||
@echo
|
||||
endif |
||||
ifneq ($(SYSTEM_OK),true) |
||||
@false
|
||||
endif |
@ -0,0 +1,84 @@ |
||||
# gRPC C++ Message Compression Tutorial |
||||
|
||||
### Prerequisite |
||||
Make sure you have run the [hello world example](../helloworld) or understood the basics of gRPC. We will not dive into the details that have been discussed in the hello world example. |
||||
|
||||
### Get the tutorial source code |
||||
|
||||
The example code for this and our other examples lives in the `examples` directory. Clone this repository to your local machine by running the following command: |
||||
|
||||
|
||||
```sh |
||||
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc |
||||
``` |
||||
|
||||
Change your current directory to examples/cpp/compression |
||||
|
||||
```sh |
||||
$ cd examples/cpp/compression/ |
||||
``` |
||||
|
||||
### Generating gRPC code |
||||
|
||||
To generate the client and server side interfaces: |
||||
|
||||
```sh |
||||
$ make helloworld.grpc.pb.cc helloworld.pb.cc |
||||
``` |
||||
Which internally invokes the proto-compiler as: |
||||
|
||||
```sh |
||||
$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto |
||||
$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto |
||||
``` |
||||
|
||||
### Writing a client and a server |
||||
|
||||
The client and the server can be based on the hello world example. |
||||
|
||||
Additionally, we can configure the compression settings. |
||||
|
||||
In the client, set the default compression algorithm of the channel via the channel arg. |
||||
|
||||
```cpp |
||||
ChannelArguments args; |
||||
// Set the default compression algorithm for the channel. |
||||
args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP); |
||||
GreeterClient greeter(grpc::CreateCustomChannel( |
||||
"localhost:50051", grpc::InsecureChannelCredentials(), args)); |
||||
``` |
||||
|
||||
Each call's compression configuration can be overwritten by client context. |
||||
|
||||
```cpp |
||||
// Overwrite the call's compression algorithm to DEFLATE. |
||||
context.set_compression_algorithm(GRPC_COMPRESS_DEFLATE); |
||||
``` |
||||
|
||||
In the server, set the default compression algorithm via the server builder. |
||||
|
||||
```cpp |
||||
ServerBuilder builder; |
||||
// Set the default compression algorithm for the server. |
||||
builder.SetDefaultCompressionAlgorithm(GRPC_COMPRESS_GZIP); |
||||
``` |
||||
|
||||
Each call's compression configuration can be overwritten by server context. |
||||
|
||||
```cpp |
||||
// Overwrite the call's compression algorithm to DEFLATE. |
||||
context->set_compression_algorithm(GRPC_COMPRESS_DEFLATE); |
||||
``` |
||||
|
||||
For a working example, refer to [greeter_client.cc](greeter_client.cc) and [greeter_server.cc](greeter_server.cc). |
||||
|
||||
Build and run the (compressing) client and the server by the following commands. |
||||
|
||||
```sh |
||||
make |
||||
./greeter_server |
||||
``` |
||||
|
||||
```sh |
||||
./greeter_client |
||||
``` |