Fix python typos (#38029)

Continuation of #37541 but focused on Python.

Top typos are:
* resolveable (5)
* dedecated (4)
* everyting (4)
* programatically (3)
* supercede (3)
* wil (3)
* acknowledgment (2)
* corutine (2)
* coversion (2)
* operaton (2)
* wich (2)

Closes #38029

PiperOrigin-RevId: 695759207
pull/37866/head
Nathan Baulch 2 weeks ago committed by Copybara-Service
parent be472f1244
commit 8342a109ae
  1. 2
      .pylintrc
  2. 2
      .pylintrc-examples
  3. 2
      .pylintrc-tests
  4. 2
      examples/python/cancellation/search.py
  5. 4
      examples/python/debug/README.md
  6. 4
      examples/python/interceptors/async/README.md
  7. 6
      examples/python/keep_alive/greeter_client.py
  8. 2
      examples/python/keep_alive/greeter_server.py
  9. 2
      examples/python/route_guide/asyncio_route_guide_client.py
  10. 2
      examples/python/wait_for_ready/wait_for_ready_with_client_timeout_example_server.py
  11. 2
      setup.cfg
  12. 2
      setup.py
  13. 2
      src/python/grpcio/_parallel_compile_patch.py
  14. 2
      src/python/grpcio/commands.py
  15. 4
      src/python/grpcio/grpc/__init__.py
  16. 10
      src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi
  17. 4
      src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
  18. 4
      src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
  19. 2
      src/python/grpcio/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi
  20. 10
      src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi
  21. 2
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
  22. 4
      src/python/grpcio/grpc/_observability.py
  23. 10
      src/python/grpcio/grpc/_runtime_protos.py
  24. 2
      src/python/grpcio/grpc/_server.py
  25. 2
      src/python/grpcio/grpc/aio/_base_call.py
  26. 4
      src/python/grpcio/grpc/aio/_base_channel.py
  27. 2
      src/python/grpcio/grpc/aio/_base_server.py
  28. 16
      src/python/grpcio/grpc/aio/_interceptor.py
  29. 2
      src/python/grpcio/grpc/framework/interfaces/base/base.py
  30. 2
      src/python/grpcio/grpc/framework/interfaces/base/utilities.py
  31. 2
      src/python/grpcio/grpc/framework/interfaces/face/utilities.py
  32. 4
      src/python/grpcio_csm_observability/grpc_csm_observability/_csm_observability_plugin.py
  33. 2
      src/python/grpcio_observability/_parallel_compile_patch.py
  34. 4
      src/python/grpcio_observability/grpc_observability/_cyobservability.pyx
  35. 2
      src/python/grpcio_observability/grpc_observability/_measures.py
  36. 2
      src/python/grpcio_observability/grpc_observability/_open_census_exporter.py
  37. 2
      src/python/grpcio_observability/grpc_observability/_open_telemetry_observability.py
  38. 8
      src/python/grpcio_observability/grpc_observability/python_observability_context.h
  39. 2
      src/python/grpcio_tests/tests/_loader.py
  40. 2
      src/python/grpcio_tests/tests/qps/client_runner.py
  41. 2
      src/python/grpcio_tests/tests/status/_grpc_status_test.py
  42. 2
      src/python/grpcio_tests/tests/unit/_compression_test.py
  43. 2
      src/python/grpcio_tests/tests/unit/_contextvars_propagation_test.py
  44. 2
      src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
  45. 2
      src/python/grpcio_tests/tests/unit/_exit_test.py
  46. 2
      src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py
  47. 2
      src/python/grpcio_tests/tests/unit/_metadata_flags_test.py
  48. 2
      src/python/grpcio_tests/tests_aio/status/grpc_status_test.py
  49. 2
      src/python/grpcio_tests/tests_aio/unit/_common.py
  50. 2
      src/python/grpcio_tests/tests_aio/unit/_metadata_test.py
  51. 4
      src/python/grpcio_tests/tests_aio/unit/_test_server.py
  52. 2
      src/python/grpcio_tests/tests_aio/unit/call_test.py
  53. 2
      src/python/grpcio_tests/tests_aio/unit/client_stream_unary_interceptor_test.py
  54. 2
      src/python/grpcio_tests/tests_aio/unit/client_unary_unary_interceptor_test.py
  55. 2
      src/python/grpcio_tests/tests_aio/unit/connectivity_test.py
  56. 2
      src/python/grpcio_tests/tests_gevent/unit/_test_server.py
  57. 2
      templates/src/python/_parallel_compile_patch.py.include
  58. 2
      tools/distrib/python/grpcio_tools/_parallel_compile_patch.py

@ -73,7 +73,7 @@ disable=
protected-access, protected-access,
# NOTE(nathaniel): Pylint and I will probably never agree on this. # NOTE(nathaniel): Pylint and I will probably never agree on this.
too-few-public-methods, too-few-public-methods,
# NOTE(nathaniel): Pylint and I wil probably never agree on this for # NOTE(nathaniel): Pylint and I will probably never agree on this for
# private classes. For public classes maybe? # private classes. For public classes maybe?
too-many-instance-attributes, too-many-instance-attributes,
# NOTE(nathaniel): Some of our modules have a lot of lines... of # NOTE(nathaniel): Some of our modules have a lot of lines... of

@ -76,7 +76,7 @@ disable=
protected-access, protected-access,
# NOTE(nathaniel): Pylint and I will probably never agree on this. # NOTE(nathaniel): Pylint and I will probably never agree on this.
too-few-public-methods, too-few-public-methods,
# NOTE(nathaniel): Pylint and I wil probably never agree on this for # NOTE(nathaniel): Pylint and I will probably never agree on this for
# private classes. For public classes maybe? # private classes. For public classes maybe?
too-many-instance-attributes, too-many-instance-attributes,
# NOTE(nathaniel): Some of our modules have a lot of lines... of # NOTE(nathaniel): Some of our modules have a lot of lines... of

@ -102,7 +102,7 @@ disable=
protected-access, protected-access,
# NOTE(nathaniel): Pylint and I will probably never agree on this. # NOTE(nathaniel): Pylint and I will probably never agree on this.
too-few-public-methods, too-few-public-methods,
# NOTE(nathaniel): Pylint and I wil probably never agree on this for # NOTE(nathaniel): Pylint and I will probably never agree on this for
# private classes. For public classes maybe? # private classes. For public classes maybe?
too-many-instance-attributes, too-many-instance-attributes,
# NOTE(nathaniel): Some of our modules have a lot of lines... of # NOTE(nathaniel): Some of our modules have a lot of lines... of

@ -39,7 +39,7 @@ def _get_hamming_distance(a, b):
def _get_substring_hamming_distance(candidate, target): def _get_substring_hamming_distance(candidate, target):
"""Calculates the minimum hamming distance between between the target """Calculates the minimum hamming distance between the target
and any substring of the candidate. and any substring of the candidate.
Args: Args:

@ -46,11 +46,11 @@ GRPC_TRACE=call_error,connectivity_state,pick_first,round_robin,glb
## How to debug your application? ## How to debug your application?
`pdb` is a debugging tool that is available for Python interpreters natively. `pdb` is a debugging tool that is available for Python interpreters natively.
You can set breakpoint, and execute commands while the application is stopped. You can set breakpoints, and execute commands while the application is stopped.
The simplest usage is add a single line in the place you want to inspect: The simplest usage is add a single line in the place you want to inspect:
`import pdb; pdb.set_trace()`. When interpreter see this line, it would pop out `import pdb; pdb.set_trace()`. When interpreter see this line, it would pop out
a interactive command line interface for you to inspect the application state. an interactive command line interface for you to inspect the application state.
For more detailed usage, see https://docs.python.org/3/library/pdb.html. For more detailed usage, see https://docs.python.org/3/library/pdb.html.

@ -15,8 +15,8 @@ This example demonstrate the usage of Async interceptors and context propagation
This example have the following steps: This example have the following steps:
1. Generate RPC ID on client side and propagate to server using `metadata`. 1. Generate RPC ID on client side and propagate to server using `metadata`.
* `contextvars` can be used here if client and server is running in a same coroutine (or same thead for Sync). * `contextvars` can be used here if client and server is running in a same coroutine (or same thead for Sync).
2. Server interceptor1 intercept the request, it checks `rpc_id_var` and decorate it with it's tag `Interceptor1`. 2. Server interceptor1 intercept the request, it checks `rpc_id_var` and decorate it with its tag `Interceptor1`.
3. Server interceptor2 intercept the request, it checks `rpc_id_var` and decorate it with it's tag `Interceptor2`. 3. Server interceptor2 intercept the request, it checks `rpc_id_var` and decorate it with its tag `Interceptor2`.
4. Server handler receives the request with `rpc_id_var` decorated by both interceptor1 and interceptor2. 4. Server handler receives the request with `rpc_id_var` decorated by both interceptor1 and interceptor2.
## How to run this example ## How to run this example

@ -37,7 +37,7 @@ def run():
grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is
sent on the transport. sent on the transport.
grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive
ping waits for an acknowledgement. If it does not receive an acknowledgment within this ping waits for an acknowledgement. If it does not receive an acknowledgement within this
time, it will close the connection. time, it will close the connection.
grpc.keepalive_permit_without_calls: If set to 1 (0 : false; 1 : true), allows keepalive grpc.keepalive_permit_without_calls: If set to 1 (0 : false; 1 : true), allows keepalive
pings to be sent even if there are no calls in flight. pings to be sent even if there are no calls in flight.
@ -60,9 +60,9 @@ def run():
unary_call(stub, 1, "you") unary_call(stub, 1, "you")
# Run 30s, run this with GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive to observe logs. # Run 30s, run this with GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive to observe logs.
# Client will be closed after receveing GOAWAY from server. # Client will be closed after receiving GOAWAY from server.
for i in range(30): for i in range(30):
print(f"{i} seconds paased.") print(f"{i} seconds passed.")
sleep(1) sleep(1)

@ -35,7 +35,7 @@ def serve():
grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is
sent on the transport. sent on the transport.
grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive
ping waits for an acknowledgement. If it does not receive an acknowledgment within ping waits for an acknowledgement. If it does not receive an acknowledgement within
this time, it will close the connection. this time, it will close the connection.
grpc.http2.min_ping_interval_without_data_ms: Minimum allowed time (in milliseconds) grpc.http2.min_ping_interval_without_data_ms: Minimum allowed time (in milliseconds)
between a server receiving successive ping frames without sending any data/header frame. between a server receiving successive ping frames without sending any data/header frame.

@ -33,7 +33,7 @@ def make_route_note(
) )
# Performs an unary call # Performs a unary call
async def guide_get_one_feature( async def guide_get_one_feature(
stub: route_guide_pb2_grpc.RouteGuideStub, point: route_guide_pb2.Point stub: route_guide_pb2_grpc.RouteGuideStub, point: route_guide_pb2.Point
) -> None: ) -> None:

@ -51,7 +51,7 @@ class Greeter(helloworld_pb2_grpc.GreeterServicer):
# for server to up and running. # for server to up and running.
starting_up_server() starting_up_server()
# Initial metadata will be send back immediately after calling send_initial_metadata. # Initial metadata will be sent back immediately after calling send_initial_metadata.
print("sending initial metadata back") print("sending initial metadata back")
servicer_context.send_initial_metadata(_INITIAL_METADATA) servicer_context.send_initial_metadata(_INITIAL_METADATA)

@ -15,7 +15,7 @@ exclude=.*protoc_plugin/protoc_plugin_test\.proto$
[metadata] [metadata]
license_files = LICENSE license_files = LICENSE
# NOTE(lidiz) Adding examples one by one due to pytype aggressive errer: # NOTE(lidiz) Adding examples one by one due to pytype aggressive error:
# ninja: error: build.ninja:178: multiple rules generate helloworld_pb2.pyi [-w dupbuild=err] # ninja: error: build.ninja:178: multiple rules generate helloworld_pb2.pyi [-w dupbuild=err]
# TODO(xuanwn): include all files in src/python/grpcio/grpc # TODO(xuanwn): include all files in src/python/grpcio/grpc
[pytype] [pytype]

@ -125,7 +125,7 @@ BUILD_WITH_BORING_SSL_ASM = _env_bool_value(
# Export this environment variable to override the platform variant that will # Export this environment variable to override the platform variant that will
# be chosen for boringssl assembly optimizations. This option is useful when # be chosen for boringssl assembly optimizations. This option is useful when
# crosscompiling and the host platform as obtained by sysconfig.get_platform() # crosscompiling and the host platform as obtained by sysconfig.get_platform()
# doesn't match the platform we are targetting. # doesn't match the platform we are targeting.
# Example value: "linux-aarch64" # Example value: "linux-aarch64"
BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM = os.environ.get( BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM = os.environ.get(
"GRPC_BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM", "" "GRPC_BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM", ""

@ -18,7 +18,7 @@
# instead. This file can be regenerated from the template by running # instead. This file can be regenerated from the template by running
# `tools/buildgen/generate_projects.sh`. # `tools/buildgen/generate_projects.sh`.
"""Patches the compile() to allow enable parallel compilation of C/C++. """Patches the compile() to enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one. build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot. Enabling parallel build helps a lot.

@ -273,7 +273,7 @@ class BuildExt(build_ext.build_ext):
# behavior in gcc and clang. The clang doesn't take --stdc++11 # behavior in gcc and clang. The clang doesn't take --stdc++11
# flags but gcc does. Since the setuptools of Python only support # flags but gcc does. Since the setuptools of Python only support
# all C or all C++ compilation, the mix of C and C++ will crash. # all C or all C++ compilation, the mix of C and C++ will crash.
# *By default*, macOS and FreBSD use clang and Linux use gcc # *By default*, macOS and FreeBSD use clang and Linux use gcc
# #
# If we are not using a permissive compiler that's OK with being # If we are not using a permissive compiler that's OK with being
# passed wrong std flags, swap out compile function by adding a filter # passed wrong std flags, swap out compile function by adding a filter

@ -1231,7 +1231,7 @@ class ServicerContext(RpcContext, metaclass=abc.ABCMeta):
def abort(self, code, details): def abort(self, code, details):
"""Raises an exception to terminate the RPC with a non-OK status. """Raises an exception to terminate the RPC with a non-OK status.
The code and details passed as arguments will supercede any existing The code and details passed as arguments will supersede any existing
ones. ones.
Args: Args:
@ -1250,7 +1250,7 @@ class ServicerContext(RpcContext, metaclass=abc.ABCMeta):
def abort_with_status(self, status): def abort_with_status(self, status):
"""Raises an exception to terminate the RPC with a non-OK status. """Raises an exception to terminate the RPC with a non-OK status.
The status passed as argument will supercede any existing status code, The status passed as argument will supersede any existing status code,
status message and trailing metadata. status message and trailing metadata.
This is an EXPERIMENTAL API. This is an EXPERIMENTAL API.

@ -214,7 +214,7 @@ cdef class _AioCall(GrpcCallWrapper):
"""Returns if the RPC call has finished. """Returns if the RPC call has finished.
Checks if the status has been provided, either Checks if the status has been provided, either
because the RPC finished or because was cancelled.. because the RPC finished or because was cancelled.
Returns: Returns:
True if the RPC can be considered finished. True if the RPC can be considered finished.
@ -235,7 +235,7 @@ cdef class _AioCall(GrpcCallWrapper):
async def status(self): async def status(self):
"""Returns the status of the RPC call. """Returns the status of the RPC call.
It returns the finshed status of the RPC. If the RPC It returns the finished status of the RPC. If the RPC
has not finished yet this function will wait until the RPC has not finished yet this function will wait until the RPC
gets finished. gets finished.
@ -277,7 +277,7 @@ cdef class _AioCall(GrpcCallWrapper):
"""Returns if the RPC was cancelled locally. """Returns if the RPC was cancelled locally.
Returns: Returns:
True when was cancelled locally, False when was cancelled remotelly or True when was cancelled locally, False when was cancelled remotely or
is still ongoing. is still ongoing.
""" """
if self._is_locally_cancelled: if self._is_locally_cancelled:
@ -397,7 +397,7 @@ cdef class _AioCall(GrpcCallWrapper):
tuple outbound_initial_metadata, tuple outbound_initial_metadata,
object context = None): object context = None):
"""Implementation of the start of a unary-stream call.""" """Implementation of the start of a unary-stream call."""
# Peer may prematurely end this RPC at any point. We need a corutine # Peer may prematurely end this RPC at any point. We need a coroutine
# that watches if the server sends the final status. # that watches if the server sends the final status.
status_task = self._loop.create_task(self._handle_status_once_received()) status_task = self._loop.create_task(self._handle_status_once_received())
@ -503,7 +503,7 @@ cdef class _AioCall(GrpcCallWrapper):
propagate the final status exception, then we have to raise it. propagate the final status exception, then we have to raise it.
Othersize, it would end normally and raise `StopAsyncIteration()`. Othersize, it would end normally and raise `StopAsyncIteration()`.
""" """
# Peer may prematurely end this RPC at any point. We need a corutine # Peer may prematurely end this RPC at any point. We need a coroutine
# that watches if the server sends the final status. # that watches if the server sends the final status.
status_task = self._loop.create_task(self._handle_status_once_received()) status_task = self._loop.create_task(self._handle_status_once_received())

@ -43,7 +43,7 @@ cdef class CallbackWrapper:
self._reference_of_future = future self._reference_of_future = future
self._reference_of_failure_handler = failure_handler self._reference_of_failure_handler = failure_handler
# NOTE(lidiz) We need to ensure when Core invokes our callback, the # NOTE(lidiz) We need to ensure when Core invokes our callback, the
# callback function itself is not deallocated. Othersise, we will get # callback function itself is not deallocated. Otherwise, we will get
# a segfault. We can view this as Core holding a ref. # a segfault. We can view this as Core holding a ref.
cpython.Py_INCREF(self) cpython.Py_INCREF(self)
@ -114,7 +114,7 @@ cdef prepend_send_initial_metadata_op(tuple ops, tuple metadata):
async def _receive_message(GrpcCallWrapper grpc_call_wrapper, async def _receive_message(GrpcCallWrapper grpc_call_wrapper,
object loop): object loop):
"""Retrives parsed messages from Core. """Retrieves parsed messages from Core.
The messages maybe already in Core's buffer, so there isn't a 1-to-1 The messages maybe already in Core's buffer, so there isn't a 1-to-1
mapping between this and the underlying "socket.read()". Also, eventually, mapping between this and the underlying "socket.read()". Also, eventually,

@ -53,7 +53,7 @@ cdef class _BoundEventLoop:
) )
# NOTE(lidiz) There isn't a way to cleanly pre-check if fd monitoring # NOTE(lidiz) There isn't a way to cleanly pre-check if fd monitoring
# support is available or not. Checking the event loop policy is not # support is available or not. Checking the event loop policy is not
# good enough. The application can has its own loop implementation, or # good enough. The application can have its own loop implementation, or
# uses different types of event loops (e.g., 1 Proactor, 3 Selectors). # uses different types of event loops (e.g., 1 Proactor, 3 Selectors).
if _has_fd_monitoring: if _has_fd_monitoring:
try: try:
@ -117,7 +117,7 @@ cdef class PollerCompletionQueue(BaseCompletionQueue):
else: else:
with gil: with gil:
# Event loops can be paused or killed at any time. So, # Event loops can be paused or killed at any time. So,
# instead of deligate to any thread, the polling thread # instead of delegate to any thread, the polling thread
# should handle the distribution of the event. # should handle the distribution of the event.
self._handle_events(None) self._handle_events(None)

@ -17,7 +17,7 @@
cdef class AioRpcStatus(Exception): cdef class AioRpcStatus(Exception):
# The final status of gRPC is represented by three trailing metadata: # The final status of gRPC is represented by three trailing metadata:
# `grpc-status`, `grpc-status-message`, abd `grpc-status-details`. # `grpc-status`, `grpc-status-message`, and `grpc-status-details`.
def __cinit__(self, def __cinit__(self,
grpc_status_code code, grpc_status_code code,
str details, str details,

@ -542,7 +542,7 @@ async def _handle_unary_unary_rpc(object method_handler,
request_raw, request_raw,
) )
# Creates a dedecated ServicerContext # Creates a dedicated ServicerContext
cdef _ServicerContext servicer_context = _ServicerContext( cdef _ServicerContext servicer_context = _ServicerContext(
rpc_state, rpc_state,
None, None,
@ -575,7 +575,7 @@ async def _handle_unary_stream_rpc(object method_handler,
request_raw, request_raw,
) )
# Creates a dedecated ServicerContext # Creates a dedicated ServicerContext
cdef _ServicerContext servicer_context = _ServicerContext( cdef _ServicerContext servicer_context = _ServicerContext(
rpc_state, rpc_state,
method_handler.request_deserializer, method_handler.request_deserializer,
@ -623,7 +623,7 @@ cdef class _MessageReceiver:
async def _handle_stream_unary_rpc(object method_handler, async def _handle_stream_unary_rpc(object method_handler,
RPCState rpc_state, RPCState rpc_state,
object loop): object loop):
# Creates a dedecated ServicerContext # Creates a dedicated ServicerContext
cdef _ServicerContext servicer_context = _ServicerContext( cdef _ServicerContext servicer_context = _ServicerContext(
rpc_state, rpc_state,
method_handler.request_deserializer, method_handler.request_deserializer,
@ -655,7 +655,7 @@ async def _handle_stream_unary_rpc(object method_handler,
async def _handle_stream_stream_rpc(object method_handler, async def _handle_stream_stream_rpc(object method_handler,
RPCState rpc_state, RPCState rpc_state,
object loop): object loop):
# Creates a dedecated ServicerContext # Creates a dedicated ServicerContext
cdef _ServicerContext servicer_context = _ServicerContext( cdef _ServicerContext servicer_context = _ServicerContext(
rpc_state, rpc_state,
method_handler.request_deserializer, method_handler.request_deserializer,
@ -871,7 +871,7 @@ cdef class _ConcurrentRpcLimiter:
def __cinit__(self, int maximum_concurrent_rpcs): def __cinit__(self, int maximum_concurrent_rpcs):
if maximum_concurrent_rpcs <= 0: if maximum_concurrent_rpcs <= 0:
raise ValueError("maximum_concurrent_rpcs should be a postive integer") raise ValueError("maximum_concurrent_rpcs should be a positive integer")
self._maximum_concurrent_rpcs = maximum_concurrent_rpcs self._maximum_concurrent_rpcs = maximum_concurrent_rpcs
self._active_rpcs = 0 self._active_rpcs = 0
self.limiter_concurrency_exceeded = False self.limiter_concurrency_exceeded = False

@ -98,7 +98,7 @@ cdef class ServerCredentials:
cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs
cdef size_t c_ssl_pem_key_cert_pairs_count cdef size_t c_ssl_pem_key_cert_pairs_count
cdef list references cdef list references
# the cert config related state is used only if this credentials is # the cert config related state is used only if these credentials are
# created with cert config/fetcher # created with cert config/fetcher
cdef object initial_cert_config cdef object initial_cert_config
cdef object cert_config_fetcher cdef object cert_config_fetcher

@ -61,7 +61,7 @@ class ObservabilityPlugin(
the gRPC team.* the gRPC team.*
The ClientCallTracerCapsule and ClientCallTracerCapsule created by this The ClientCallTracerCapsule and ClientCallTracerCapsule created by this
plugin should be inject to gRPC core using observability_init at the plugin should be injected to gRPC core using observability_init at the
start of a program, before any channels/servers are built. start of a program, before any channels/servers are built.
Any future methods added to this interface cannot have the Any future methods added to this interface cannot have the
@ -93,7 +93,7 @@ class ObservabilityPlugin(
Args: Args:
method_name: The method name of the call in byte format. method_name: The method name of the call in byte format.
target: The channel target of the call in byte format. target: The channel target of the call in byte format.
registered_method: Wether this method is pre-registered. registered_method: Whether this method is pre-registered.
Returns: Returns:
A PyCapsule which stores a ClientCallTracer object. A PyCapsule which stores a ClientCallTracer object.

@ -88,7 +88,7 @@ def protos(protobuf_path): # pylint: disable=unused-argument
The returned module object corresponds to the _pb2.py file generated The returned module object corresponds to the _pb2.py file generated
by protoc. The path is expected to be relative to an entry on sys.path by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable and all transitive dependencies of the file should also be resolvable
from an entry on sys.path. from an entry on sys.path.
To completely disable the machinery behind this function, set the To completely disable the machinery behind this function, set the
@ -96,7 +96,7 @@ def protos(protobuf_path): # pylint: disable=unused-argument
Args: Args:
protobuf_path: The path to the .proto file on the filesystem. This path protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its must be resolvable from an entry on sys.path and so must all of its
transitive dependencies. transitive dependencies.
Returns: Returns:
@ -125,7 +125,7 @@ def services(protobuf_path): # pylint: disable=unused-argument
The returned module object corresponds to the _pb2_grpc.py file generated The returned module object corresponds to the _pb2_grpc.py file generated
by protoc. The path is expected to be relative to an entry on sys.path by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable and all transitive dependencies of the file should also be resolvable
from an entry on sys.path. from an entry on sys.path.
To completely disable the machinery behind this function, set the To completely disable the machinery behind this function, set the
@ -133,7 +133,7 @@ def services(protobuf_path): # pylint: disable=unused-argument
Args: Args:
protobuf_path: The path to the .proto file on the filesystem. This path protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its must be resolvable from an entry on sys.path and so must all of its
transitive dependencies. transitive dependencies.
Returns: Returns:
@ -156,7 +156,7 @@ def protos_and_services(protobuf_path): # pylint: disable=unused-argument
Args: Args:
protobuf_path: The path to the .proto file on the filesystem. This path protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its must be resolvable from an entry on sys.path and so must all of its
transitive dependencies. transitive dependencies.
Returns: Returns:

@ -1075,7 +1075,7 @@ def _handle_call(
) -> Tuple[Optional[_RPCState], Optional[futures.Future]]: ) -> Tuple[Optional[_RPCState], Optional[futures.Future]]:
"""Handles RPC based on provided handlers. """Handles RPC based on provided handlers.
When receiving a call event from Core, registered method will have it's When receiving a call event from Core, registered method will have its
name as tag, we pass the tag as registered_method_name to this method, name as tag, we pass the tag as registered_method_name to this method,
then we can find the handler in registered_method_handlers based on then we can find the handler in registered_method_handlers based on
the method name. the method name.

@ -138,7 +138,7 @@ class Call(RpcContext, metaclass=ABCMeta):
class UnaryUnaryCall( class UnaryUnaryCall(
Generic[RequestType, ResponseType], Call, metaclass=ABCMeta Generic[RequestType, ResponseType], Call, metaclass=ABCMeta
): ):
"""The abstract base class of an unary-unary RPC on the client-side.""" """The abstract base class of a unary-unary RPC on the client-side."""
@abstractmethod @abstractmethod
def __await__(self) -> Generator[Any, None, ResponseType]: def __await__(self) -> Generator[Any, None, ResponseType]:

@ -183,7 +183,7 @@ class Channel(abc.ABC):
"""Enables asynchronous RPC invocation as a client. """Enables asynchronous RPC invocation as a client.
Channel objects implement the Asynchronous Context Manager (aka. async Channel objects implement the Asynchronous Context Manager (aka. async
with) type, although they are not supportted to be entered and exited with) type, although they are not supported to be entered and exited
multiple times. multiple times.
""" """
@ -312,7 +312,7 @@ class Channel(abc.ABC):
whether the method is registered. whether the method is registered.
Returns: Returns:
A UnarySteramMultiCallable value for the named unary-stream method. A UnaryStreamMultiCallable value for the named unary-stream method.
""" """
@abc.abstractmethod @abc.abstractmethod

@ -197,7 +197,7 @@ class ServicerContext(Generic[RequestType, ResponseType], abc.ABC):
) -> NoReturn: ) -> NoReturn:
"""Raises an exception to terminate the RPC with a non-OK status. """Raises an exception to terminate the RPC with a non-OK status.
The code and details passed as arguments will supercede any existing The code and details passed as arguments will supersede any existing
ones. ones.
Args: Args:

@ -477,8 +477,8 @@ class _InterceptedStreamResponseMixin:
_response_aiter: Optional[AsyncIterable[ResponseType]] _response_aiter: Optional[AsyncIterable[ResponseType]]
def _init_stream_response_mixin(self) -> None: def _init_stream_response_mixin(self) -> None:
# Is initalized later, otherwise if the iterator is not finally # Is initialized later, otherwise if the iterator is not finally
# consumed a logging warning is emmited by Asyncio. # consumed a logging warning is emitted by Asyncio.
self._response_aiter = None self._response_aiter = None
async def _wait_for_interceptor_task_response_iterator( async def _wait_for_interceptor_task_response_iterator(
@ -1143,10 +1143,10 @@ class _StreamCallResponseIterator:
class UnaryStreamCallResponseIterator( class UnaryStreamCallResponseIterator(
_StreamCallResponseIterator, _base_call.UnaryStreamCall _StreamCallResponseIterator, _base_call.UnaryStreamCall
): ):
"""UnaryStreamCall class wich uses an alternative response iterator.""" """UnaryStreamCall class which uses an alternative response iterator."""
async def read(self) -> Union[EOFType, ResponseType]: async def read(self) -> Union[EOFType, ResponseType]:
# Behind the scenes everyting goes through the # Behind the scenes everything goes through the
# async iterator. So this path should not be reached. # async iterator. So this path should not be reached.
raise NotImplementedError() raise NotImplementedError()
@ -1154,21 +1154,21 @@ class UnaryStreamCallResponseIterator(
class StreamStreamCallResponseIterator( class StreamStreamCallResponseIterator(
_StreamCallResponseIterator, _base_call.StreamStreamCall _StreamCallResponseIterator, _base_call.StreamStreamCall
): ):
"""StreamStreamCall class wich uses an alternative response iterator.""" """StreamStreamCall class which uses an alternative response iterator."""
async def read(self) -> Union[EOFType, ResponseType]: async def read(self) -> Union[EOFType, ResponseType]:
# Behind the scenes everyting goes through the # Behind the scenes everything goes through the
# async iterator. So this path should not be reached. # async iterator. So this path should not be reached.
raise NotImplementedError() raise NotImplementedError()
async def write(self, request: RequestType) -> None: async def write(self, request: RequestType) -> None:
# Behind the scenes everyting goes through the # Behind the scenes everything goes through the
# async iterator provided by the InterceptedStreamStreamCall. # async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached. # So this path should not be reached.
raise NotImplementedError() raise NotImplementedError()
async def done_writing(self) -> None: async def done_writing(self) -> None:
# Behind the scenes everyting goes through the # Behind the scenes everything goes through the
# async iterator provided by the InterceptedStreamStreamCall. # async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached. # So this path should not be reached.
raise NotImplementedError() raise NotImplementedError()

@ -83,7 +83,7 @@ class Completion(abc.ABC):
"""An aggregate of the values exchanged upon operation completion. """An aggregate of the values exchanged upon operation completion.
Attributes: Attributes:
terminal_metadata: A terminal metadata value for the operaton. terminal_metadata: A terminal metadata value for the operation.
code: A code value for the operation. code: A code value for the operation.
message: A message value for the operation. message: A message value for the operation.
""" """

@ -57,7 +57,7 @@ def completion(terminal_metadata, code, message):
"""Creates a base.Completion aggregating the given operation values. """Creates a base.Completion aggregating the given operation values.
Args: Args:
terminal_metadata: A terminal metadata value for an operaton. terminal_metadata: A terminal metadata value for an operation.
code: A code value for an operation. code: A code value for an operation.
message: A message value for an operation. message: A message value for an operation.

@ -174,7 +174,7 @@ def unary_stream_event(behavior):
Args: Args:
behavior: The implementation of a unary-stream RPC method as a callable behavior: The implementation of a unary-stream RPC method as a callable
value that takes a request value, a stream.Consumer to which to pass the value that takes a request value, a stream.Consumer to which to pass the
the response values of the RPC, and an face.ServicerContext. response values of the RPC, and an face.ServicerContext.
Returns: Returns:
An face.MethodImplementation derived from the given behavior. An face.MethodImplementation derived from the given behavior.

@ -207,7 +207,7 @@ class CsmOpenTelemetryPluginOption(OpenTelemetryPluginOption):
target: Required. The target for the RPC. target: Required. The target for the RPC.
Returns: Returns:
True if this this plugin option is active on the channel, false otherwise. True if this plugin option is active on the channel, false otherwise.
""" """
# CSM channels should have an "xds" scheme # CSM channels should have an "xds" scheme
if not target.startswith("xds:"): if not target.startswith("xds:"):
@ -237,7 +237,7 @@ class CsmOpenTelemetryPluginOption(OpenTelemetryPluginOption):
xds: Required. if this server is build for xds. xds: Required. if this server is build for xds.
Returns: Returns:
True if this this plugin option is active on the server, false otherwise. True if this plugin option is active on the server, false otherwise.
""" """
return True return True

@ -18,7 +18,7 @@
# instead. This file can be regenerated from the template by running # instead. This file can be regenerated from the template by running
# `tools/buildgen/generate_projects.sh`. # `tools/buildgen/generate_projects.sh`.
"""Patches the compile() to allow enable parallel compilation of C/C++. """Patches the compile() to enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one. build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot. Enabling parallel build helps a lot.

@ -179,7 +179,7 @@ def _c_measurement_to_measurement(object measurement
"""Convert Cython Measurement to Python measurement. """Convert Cython Measurement to Python measurement.
Args: Args:
measurement: Actual measurement repesented by Cython type Measurement, using object here measurement: Actual measurement represented by Cython type Measurement, using object here
since Cython refuse to automatically convert a union with unsafe type combinations. since Cython refuse to automatically convert a union with unsafe type combinations.
Returns: Returns:
@ -308,7 +308,7 @@ cdef void _export_census_data(object exporter):
while not GLOBAL_SHUTDOWN_EXPORT_THREAD: while not GLOBAL_SHUTDOWN_EXPORT_THREAD:
lk = new unique_lock[mutex](g_census_data_buffer_mutex) lk = new unique_lock[mutex](g_census_data_buffer_mutex)
# Wait for next batch of census data OR timeout at fixed interval. # Wait for next batch of census data OR timeout at fixed interval.
# Batch export census data to minimize the time we acquiring the GIL. # Batch export census data to minimize the time we acquire the GIL.
AwaitNextBatchLocked(dereference(lk), export_interval_ms) AwaitNextBatchLocked(dereference(lk), export_interval_ms)
# Break only when buffer have data # Break only when buffer have data

@ -17,7 +17,7 @@ from opencensus.stats import measure
# These measure definitions should be kept in sync across opencensus implementations. # These measure definitions should be kept in sync across opencensus implementations.
# https://github.com/census-instrumentation/opencensus-java/blob/master/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcMeasureConstants.java. # https://github.com/census-instrumentation/opencensus-java/blob/master/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcMeasureConstants.java.
# Unit constatns # Unit constants
UNIT_BYTES = "By" UNIT_BYTES = "By"
UNIT_MILLISECONDS = "ms" UNIT_MILLISECONDS = "ms"
UNIT_COUNT = "1" UNIT_COUNT = "1"

@ -123,7 +123,7 @@ class OpenCensusExporter(_observability.Exporter):
if not measure: if not measure:
continue continue
# Create a measurement map for each metric, otherwise metrics will # Create a measurement map for each metric, otherwise metrics will
# be override instead of accumulate. # be overridden instead of accumulate.
measurement_map = self.stats_recorder.new_measurement_map() measurement_map = self.stats_recorder.new_measurement_map()
# Add data label to default labels. # Add data label to default labels.
labels = data.labels labels = data.labels

@ -383,7 +383,7 @@ class OpenTelemetryObservability(grpc._observability.ObservabilityPlugin):
try: try:
_cyobservability.cyobservability_init(self._exporter) _cyobservability.cyobservability_init(self._exporter)
# TODO(xuanwn): Use specific exceptons # TODO(xuanwn): Use specific exceptions
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
_LOGGER.exception("Initiate observability failed with: %s", e) _LOGGER.exception("Initiate observability failed with: %s", e)

@ -208,11 +208,11 @@ class Span final {
uint64_t child_span_count_ = 0; uint64_t child_span_count_ = 0;
}; };
// PythonCensusContext is associated with each clientCallTrcer, // PythonCensusContext is associated with each clientCallTracer,
// clientCallAttemptTracer and ServerCallTracer to help manage the span, // clientCallAttemptTracer and ServerCallTracer to help manage the span,
// spanContext and labels for each tracer. Craete a new PythonCensusContext will // spanContext and labels for each tracer. Create a new PythonCensusContext will
// always reasult in creating a new span (and a new SpanContext for that span). // always result in creating a new span (and a new SpanContext for that span).
// It's created during callTraceer initialization and will be destroyed after // It's created during callTracer initialization and will be destroyed after
// the destruction of each callTracer. // the destruction of each callTracer.
class PythonCensusContext { class PythonCensusContext {
public: public:

@ -55,7 +55,7 @@ class Loader(object):
Attributes: Attributes:
suite (unittest.TestSuite): All tests collected by the loader. suite (unittest.TestSuite): All tests collected by the loader.
loader (unittest.TestLoader): Standard Python unittest loader to be ran per loader (unittest.TestLoader): Standard Python unittest loader to be run per
module discovered. module discovered.
module_matcher (re.RegexObject): A regular expression object to match module_matcher (re.RegexObject): A regular expression object to match
against module names and determine whether or not the discovered module against module names and determine whether or not the discovered module

@ -70,7 +70,7 @@ class ClosedLoopClientRunner(ClientRunner):
super(ClosedLoopClientRunner, self).__init__(client) super(ClosedLoopClientRunner, self).__init__(client)
self._is_running = False self._is_running = False
self._request_count = request_count self._request_count = request_count
# For server-streaming RPC, don't spawn new RPC after each responses. # For server-streaming RPC, don't spawn new RPC after each response.
# This yield at most ~17% for single RPC scenarios. # This yield at most ~17% for single RPC scenarios.
if not no_ping_pong: if not no_ping_pong:
# Send a new request on each response for closed loop # Send a new request on each response for closed loop

@ -200,7 +200,7 @@ class StatusTest(unittest.TestCase):
).with_call(_REQUEST) ).with_call(_REQUEST)
rpc_error = exception_context.exception rpc_error = exception_context.exception
self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN) self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN)
# Invalid status code exception raised during coversion # Invalid status code exception raised during conversion
self.assertIn("Invalid status code", rpc_error.details()) self.assertIn("Invalid status code", rpc_error.details())

@ -296,7 +296,7 @@ class CompressionTest(unittest.TestCase):
self.assertGreaterEqual( self.assertGreaterEqual(
compression_ratio, compression_ratio,
-1.0 * _COMPRESSION_RATIO_THRESHOLD, -1.0 * _COMPRESSION_RATIO_THRESHOLD,
msg="Actual compession ratio: {}".format(compression_ratio), msg="Actual compression ratio: {}".format(compression_ratio),
) )
def assertConfigurationCompressed( def assertConfigurationCompressed(

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test of propagation of contextvars to AuthMetadataPlugin threads..""" """Test of propagation of contextvars to AuthMetadataPlugin threads."""
import contextlib import contextlib
import logging import logging

@ -251,7 +251,7 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase):
) )
server_call_driver.events() server_call_driver.events()
client_recieve_initial_metadata_event = ( client_receive_initial_metadata_event = (
client_receive_initial_metadata_event_future.result() client_receive_initial_metadata_event_future.result()
) )

@ -62,7 +62,7 @@ atexit.register(cleanup_processes)
def _process_wait_with_timeout(process, timeout=WAIT_CHECK_DEFAULT_TIMEOUT): def _process_wait_with_timeout(process, timeout=WAIT_CHECK_DEFAULT_TIMEOUT):
"""A funciton to mimic 3.3+ only timeout argument in process.wait.""" """A function to mimic 3.3+ only timeout argument in process.wait."""
deadline = datetime.datetime.now() + timeout deadline = datetime.datetime.now() + timeout
while (process.poll() is None) and (datetime.datetime.now() < deadline): while (process.poll() is None) and (datetime.datetime.now() < deadline):
time.sleep(WAIT_CHECK_INTERVAL.total_seconds()) time.sleep(WAIT_CHECK_INTERVAL.total_seconds())

@ -39,7 +39,7 @@ class GrpcShutdownTest(unittest.TestCase):
): ):
connection_failed.set() connection_failed.set()
# Connects to an void address, and subscribes state changes # Connects to a void address, and subscribes state changes
channel = grpc.insecure_channel("0.1.1.1:12345") channel = grpc.insecure_channel("0.1.1.1:12345")
channel.subscribe(on_state_change, True) channel.subscribe(on_state_change, True)

@ -234,7 +234,7 @@ class MetadataFlagsTest(unittest.TestCase):
# To test the wait mechanism, Python thread is required to make # To test the wait mechanism, Python thread is required to make
# client set up first without handling them case by case. # client set up first without handling them case by case.
# Also, Python thread don't pass the unhandled exceptions to # Also, Python thread don't pass the unhandled exceptions to
# main thread. So, it need another method to store the # main thread. So, it needs another method to store the
# exceptions and raise them again in main thread. # exceptions and raise them again in main thread.
unhandled_exceptions = queue.Queue() unhandled_exceptions = queue.Queue()

@ -174,7 +174,7 @@ class StatusTest(AioTestBase):
await self._channel.unary_unary(_INVALID_CODE)(_REQUEST) await self._channel.unary_unary(_INVALID_CODE)(_REQUEST)
rpc_error = exception_context.exception rpc_error = exception_context.exception
self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN) self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN)
# Invalid status code exception raised during coversion # Invalid status code exception raised during conversion
self.assertIn("Invalid status code", rpc_error.details()) self.assertIn("Invalid status code", rpc_error.details())

@ -51,7 +51,7 @@ def inject_callbacks(call: aio.Call):
first_callback_ran = asyncio.Event() first_callback_ran = asyncio.Event()
def first_callback(call): def first_callback(call):
# Validate that all resopnses have been received # Validate that all responses have been received
# and the call is an end state. # and the call is an end state.
assert call.done() assert call.done()
first_callback_ran.set() first_callback_ran.set()

@ -100,7 +100,7 @@ class TestTypeMetadata(unittest.TestCase):
def test_init_metadata(self): def test_init_metadata(self):
test_cases = { test_cases = {
"emtpy": (), "empty": (),
"with-single-data": self._DEFAULT_DATA, "with-single-data": self._DEFAULT_DATA,
"with-multi-data": self._MULTI_ENTRY_DATA, "with-multi-data": self._MULTI_ENTRY_DATA,
} }

@ -102,7 +102,7 @@ class TestServiceServicer(test_pb2_grpc.TestServiceServicer):
else: else:
yield messages_pb2.StreamingOutputCallResponse() yield messages_pb2.StreamingOutputCallResponse()
# Next methods are extra ones that are registred programatically # Next methods are extra ones that are registered programmatically
# when the sever is instantiated. They are not being provided by # when the sever is instantiated. They are not being provided by
# the proto file. # the proto file.
async def UnaryCallWithSleep(self, unused_request, unused_context): async def UnaryCallWithSleep(self, unused_request, unused_context):
@ -144,7 +144,7 @@ class TestServiceServicer(test_pb2_grpc.TestServiceServicer):
def _create_extra_generic_handler(servicer: TestServiceServicer): def _create_extra_generic_handler(servicer: TestServiceServicer):
# Add programatically extra methods not provided by the proto file # Add programmatically extra methods not provided by the proto file
# that are used during the tests # that are used during the tests
rpc_method_handlers = { rpc_method_handlers = {
"UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler( "UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler(

@ -411,7 +411,7 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase):
Certain classes of error only appear for very specific interleavings of Certain classes of error only appear for very specific interleavings of
coroutines. Rather than inserting semi-private asyncio.Events throughout coroutines. Rather than inserting semi-private asyncio.Events throughout
the implementation on which to coordinate and explicilty waiting on those the implementation on which to coordinate and explicitly waiting on those
in tests, we instead search for bugs over the space of interleavings by in tests, we instead search for bugs over the space of interleavings by
stochastically varying the durations of certain events within the test. stochastically varying the durations of certain events within the test.
""" """

@ -431,7 +431,7 @@ class TestStreamUnaryClientInterceptor(AioTestBase):
await channel.close() await channel.close()
async def test_cancel_while_writing(self): async def test_cancel_while_writing(self):
# Test cancelation before making any write or after doing at least 1 # Test cancellation before making any write or after doing at least 1
for num_writes_before_cancel in (0, 1): for num_writes_before_cancel in (0, 1):
with self.subTest( with self.subTest(
name="Num writes before cancel: {}".format( name="Num writes before cancel: {}".format(

@ -225,7 +225,7 @@ class TestUnaryUnaryClientInterceptor(AioTestBase):
self.assertEqual(grpc.StatusCode.OK, await call.code()) self.assertEqual(grpc.StatusCode.OK, await call.code())
# Check that two calls were made, first one finishing with # Check that two calls were made, first one finishing with
# a deadline and second one finishing ok.. # a deadline and second one finishing ok.
self.assertEqual(len(interceptor.calls), 2) self.assertEqual(len(interceptor.calls), 2)
self.assertEqual( self.assertEqual(
await interceptor.calls[0].code(), await interceptor.calls[0].code(),

@ -116,7 +116,7 @@ class TestConnectivityState(AioTestBase):
# Make sure there isn't any exception in the task # Make sure there isn't any exception in the task
await pending_task await pending_task
# It can raise exceptions since it is an usage error, but it should not # It can raise exceptions since it is a usage error, but it should not
# segfault or abort. # segfault or abort.
with self.assertRaises(aio.UsageError): with self.assertRaises(aio.UsageError):
await channel.wait_for_state_change( await channel.wait_for_state_change(

@ -47,7 +47,7 @@ def start_test_server(port: int = 0) -> Tuple[str, Any]:
def _create_extra_generic_handler(servicer: TestServiceServicer) -> Any: def _create_extra_generic_handler(servicer: TestServiceServicer) -> Any:
# Add programatically extra methods not provided by the proto file # Add programmatically extra methods not provided by the proto file
# that are used during the tests # that are used during the tests
rpc_method_handlers = { rpc_method_handlers = {
"UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler( "UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler(

@ -1,4 +1,4 @@
"""Patches the compile() to allow enable parallel compilation of C/C++. """Patches the compile() to enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one. build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot. Enabling parallel build helps a lot.

@ -18,7 +18,7 @@
# instead. This file can be regenerated from the template by running # instead. This file can be regenerated from the template by running
# `tools/buildgen/generate_projects.sh`. # `tools/buildgen/generate_projects.sh`.
"""Patches the compile() to allow enable parallel compilation of C/C++. """Patches the compile() to enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one. build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot. Enabling parallel build helps a lot.

Loading…
Cancel
Save