diff --git a/.pylintrc b/.pylintrc index 414505fe34b..6cf4b94c88e 100644 --- a/.pylintrc +++ b/.pylintrc @@ -73,7 +73,7 @@ disable= protected-access, # NOTE(nathaniel): Pylint and I will probably never agree on this. too-few-public-methods, - # NOTE(nathaniel): Pylint and I wil probably never agree on this for + # NOTE(nathaniel): Pylint and I will probably never agree on this for # private classes. For public classes maybe? too-many-instance-attributes, # NOTE(nathaniel): Some of our modules have a lot of lines... of diff --git a/.pylintrc-examples b/.pylintrc-examples index 38f91fac505..db73aa8eca7 100644 --- a/.pylintrc-examples +++ b/.pylintrc-examples @@ -76,7 +76,7 @@ disable= protected-access, # NOTE(nathaniel): Pylint and I will probably never agree on this. too-few-public-methods, - # NOTE(nathaniel): Pylint and I wil probably never agree on this for + # NOTE(nathaniel): Pylint and I will probably never agree on this for # private classes. For public classes maybe? too-many-instance-attributes, # NOTE(nathaniel): Some of our modules have a lot of lines... of diff --git a/.pylintrc-tests b/.pylintrc-tests index fffb08783a0..7e34a14d866 100644 --- a/.pylintrc-tests +++ b/.pylintrc-tests @@ -102,7 +102,7 @@ disable= protected-access, # NOTE(nathaniel): Pylint and I will probably never agree on this. too-few-public-methods, - # NOTE(nathaniel): Pylint and I wil probably never agree on this for + # NOTE(nathaniel): Pylint and I will probably never agree on this for # private classes. For public classes maybe? too-many-instance-attributes, # NOTE(nathaniel): Some of our modules have a lot of lines... of diff --git a/examples/python/cancellation/search.py b/examples/python/cancellation/search.py index 731b40e25d2..3ceb9fefbc8 100644 --- a/examples/python/cancellation/search.py +++ b/examples/python/cancellation/search.py @@ -39,7 +39,7 @@ def _get_hamming_distance(a, b): def _get_substring_hamming_distance(candidate, target): - """Calculates the minimum hamming distance between between the target + """Calculates the minimum hamming distance between the target and any substring of the candidate. Args: diff --git a/examples/python/debug/README.md b/examples/python/debug/README.md index ceed31ef767..237e3085108 100644 --- a/examples/python/debug/README.md +++ b/examples/python/debug/README.md @@ -46,11 +46,11 @@ GRPC_TRACE=call_error,connectivity_state,pick_first,round_robin,glb ## How to debug your application? `pdb` is a debugging tool that is available for Python interpreters natively. -You can set breakpoint, and execute commands while the application is stopped. +You can set breakpoints, and execute commands while the application is stopped. The simplest usage is add a single line in the place you want to inspect: `import pdb; pdb.set_trace()`. When interpreter see this line, it would pop out -a interactive command line interface for you to inspect the application state. +an interactive command line interface for you to inspect the application state. For more detailed usage, see https://docs.python.org/3/library/pdb.html. diff --git a/examples/python/interceptors/async/README.md b/examples/python/interceptors/async/README.md index bcd4e31a667..83c032e20b6 100644 --- a/examples/python/interceptors/async/README.md +++ b/examples/python/interceptors/async/README.md @@ -15,8 +15,8 @@ This example demonstrate the usage of Async interceptors and context propagation This example have the following steps: 1. Generate RPC ID on client side and propagate to server using `metadata`. * `contextvars` can be used here if client and server is running in a same coroutine (or same thead for Sync). -2. Server interceptor1 intercept the request, it checks `rpc_id_var` and decorate it with it's tag `Interceptor1`. -3. Server interceptor2 intercept the request, it checks `rpc_id_var` and decorate it with it's tag `Interceptor2`. +2. Server interceptor1 intercept the request, it checks `rpc_id_var` and decorate it with its tag `Interceptor1`. +3. Server interceptor2 intercept the request, it checks `rpc_id_var` and decorate it with its tag `Interceptor2`. 4. Server handler receives the request with `rpc_id_var` decorated by both interceptor1 and interceptor2. ## How to run this example diff --git a/examples/python/keep_alive/greeter_client.py b/examples/python/keep_alive/greeter_client.py index 387acd8e8b1..858077baef9 100644 --- a/examples/python/keep_alive/greeter_client.py +++ b/examples/python/keep_alive/greeter_client.py @@ -37,7 +37,7 @@ def run(): grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is sent on the transport. grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive - ping waits for an acknowledgement. If it does not receive an acknowledgment within this + ping waits for an acknowledgement. If it does not receive an acknowledgement within this time, it will close the connection. grpc.keepalive_permit_without_calls: If set to 1 (0 : false; 1 : true), allows keepalive pings to be sent even if there are no calls in flight. @@ -60,9 +60,9 @@ def run(): unary_call(stub, 1, "you") # Run 30s, run this with GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive to observe logs. - # Client will be closed after receveing GOAWAY from server. + # Client will be closed after receiving GOAWAY from server. for i in range(30): - print(f"{i} seconds paased.") + print(f"{i} seconds passed.") sleep(1) diff --git a/examples/python/keep_alive/greeter_server.py b/examples/python/keep_alive/greeter_server.py index edfa306a90c..60bb328b04b 100644 --- a/examples/python/keep_alive/greeter_server.py +++ b/examples/python/keep_alive/greeter_server.py @@ -35,7 +35,7 @@ def serve(): grpc.keepalive_time_ms: The period (in milliseconds) after which a keepalive ping is sent on the transport. grpc.keepalive_timeout_ms: The amount of time (in milliseconds) the sender of the keepalive - ping waits for an acknowledgement. If it does not receive an acknowledgment within + ping waits for an acknowledgement. If it does not receive an acknowledgement within this time, it will close the connection. grpc.http2.min_ping_interval_without_data_ms: Minimum allowed time (in milliseconds) between a server receiving successive ping frames without sending any data/header frame. diff --git a/examples/python/route_guide/asyncio_route_guide_client.py b/examples/python/route_guide/asyncio_route_guide_client.py index c874cc53695..980835404fc 100644 --- a/examples/python/route_guide/asyncio_route_guide_client.py +++ b/examples/python/route_guide/asyncio_route_guide_client.py @@ -33,7 +33,7 @@ def make_route_note( ) -# Performs an unary call +# Performs a unary call async def guide_get_one_feature( stub: route_guide_pb2_grpc.RouteGuideStub, point: route_guide_pb2.Point ) -> None: diff --git a/examples/python/wait_for_ready/wait_for_ready_with_client_timeout_example_server.py b/examples/python/wait_for_ready/wait_for_ready_with_client_timeout_example_server.py index 5ff21d1ad1f..f24b4c404c7 100644 --- a/examples/python/wait_for_ready/wait_for_ready_with_client_timeout_example_server.py +++ b/examples/python/wait_for_ready/wait_for_ready_with_client_timeout_example_server.py @@ -51,7 +51,7 @@ class Greeter(helloworld_pb2_grpc.GreeterServicer): # for server to up and running. starting_up_server() - # Initial metadata will be send back immediately after calling send_initial_metadata. + # Initial metadata will be sent back immediately after calling send_initial_metadata. print("sending initial metadata back") servicer_context.send_initial_metadata(_INITIAL_METADATA) diff --git a/setup.cfg b/setup.cfg index be9e83e43b0..8d2cd56c158 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,7 +15,7 @@ exclude=.*protoc_plugin/protoc_plugin_test\.proto$ [metadata] license_files = LICENSE -# NOTE(lidiz) Adding examples one by one due to pytype aggressive errer: +# NOTE(lidiz) Adding examples one by one due to pytype aggressive error: # ninja: error: build.ninja:178: multiple rules generate helloworld_pb2.pyi [-w dupbuild=err] # TODO(xuanwn): include all files in src/python/grpcio/grpc [pytype] diff --git a/setup.py b/setup.py index 5d50a608bf9..3b6edbe0022 100644 --- a/setup.py +++ b/setup.py @@ -125,7 +125,7 @@ BUILD_WITH_BORING_SSL_ASM = _env_bool_value( # Export this environment variable to override the platform variant that will # be chosen for boringssl assembly optimizations. This option is useful when # crosscompiling and the host platform as obtained by sysconfig.get_platform() -# doesn't match the platform we are targetting. +# doesn't match the platform we are targeting. # Example value: "linux-aarch64" BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM = os.environ.get( "GRPC_BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM", "" diff --git a/src/python/grpcio/_parallel_compile_patch.py b/src/python/grpcio/_parallel_compile_patch.py index 9c98aa92436..e91dee1eb0a 100644 --- a/src/python/grpcio/_parallel_compile_patch.py +++ b/src/python/grpcio/_parallel_compile_patch.py @@ -18,7 +18,7 @@ # instead. This file can be regenerated from the template by running # `tools/buildgen/generate_projects.sh`. -"""Patches the compile() to allow enable parallel compilation of C/C++. +"""Patches the compile() to enable parallel compilation of C/C++. build_ext has lots of C/C++ files and normally them one by one. Enabling parallel build helps a lot. diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py index 0ceb0546a99..4673157fc9f 100644 --- a/src/python/grpcio/commands.py +++ b/src/python/grpcio/commands.py @@ -273,7 +273,7 @@ class BuildExt(build_ext.build_ext): # behavior in gcc and clang. The clang doesn't take --stdc++11 # flags but gcc does. Since the setuptools of Python only support # all C or all C++ compilation, the mix of C and C++ will crash. - # *By default*, macOS and FreBSD use clang and Linux use gcc + # *By default*, macOS and FreeBSD use clang and Linux use gcc # # If we are not using a permissive compiler that's OK with being # passed wrong std flags, swap out compile function by adding a filter diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index 8e1fb2631ad..067f66693f3 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -1231,7 +1231,7 @@ class ServicerContext(RpcContext, metaclass=abc.ABCMeta): def abort(self, code, details): """Raises an exception to terminate the RPC with a non-OK status. - The code and details passed as arguments will supercede any existing + The code and details passed as arguments will supersede any existing ones. Args: @@ -1250,7 +1250,7 @@ class ServicerContext(RpcContext, metaclass=abc.ABCMeta): def abort_with_status(self, status): """Raises an exception to terminate the RPC with a non-OK status. - The status passed as argument will supercede any existing status code, + The status passed as argument will supersede any existing status code, status message and trailing metadata. This is an EXPERIMENTAL API. diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi index 00c0a29c2ab..2ec8a25cc46 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi @@ -214,7 +214,7 @@ cdef class _AioCall(GrpcCallWrapper): """Returns if the RPC call has finished. Checks if the status has been provided, either - because the RPC finished or because was cancelled.. + because the RPC finished or because was cancelled. Returns: True if the RPC can be considered finished. @@ -235,7 +235,7 @@ cdef class _AioCall(GrpcCallWrapper): async def status(self): """Returns the status of the RPC call. - It returns the finshed status of the RPC. If the RPC + It returns the finished status of the RPC. If the RPC has not finished yet this function will wait until the RPC gets finished. @@ -277,7 +277,7 @@ cdef class _AioCall(GrpcCallWrapper): """Returns if the RPC was cancelled locally. Returns: - True when was cancelled locally, False when was cancelled remotelly or + True when was cancelled locally, False when was cancelled remotely or is still ongoing. """ if self._is_locally_cancelled: @@ -397,7 +397,7 @@ cdef class _AioCall(GrpcCallWrapper): tuple outbound_initial_metadata, object context = None): """Implementation of the start of a unary-stream call.""" - # Peer may prematurely end this RPC at any point. We need a corutine + # Peer may prematurely end this RPC at any point. We need a coroutine # that watches if the server sends the final status. status_task = self._loop.create_task(self._handle_status_once_received()) @@ -503,7 +503,7 @@ cdef class _AioCall(GrpcCallWrapper): propagate the final status exception, then we have to raise it. Othersize, it would end normally and raise `StopAsyncIteration()`. """ - # Peer may prematurely end this RPC at any point. We need a corutine + # Peer may prematurely end this RPC at any point. We need a coroutine # that watches if the server sends the final status. status_task = self._loop.create_task(self._handle_status_once_received()) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi index 2b0df0e5ce7..a5b37662aa8 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi @@ -43,7 +43,7 @@ cdef class CallbackWrapper: self._reference_of_future = future self._reference_of_failure_handler = failure_handler # NOTE(lidiz) We need to ensure when Core invokes our callback, the - # callback function itself is not deallocated. Othersise, we will get + # callback function itself is not deallocated. Otherwise, we will get # a segfault. We can view this as Core holding a ref. cpython.Py_INCREF(self) @@ -114,7 +114,7 @@ cdef prepend_send_initial_metadata_op(tuple ops, tuple metadata): async def _receive_message(GrpcCallWrapper grpc_call_wrapper, object loop): - """Retrives parsed messages from Core. + """Retrieves parsed messages from Core. The messages maybe already in Core's buffer, so there isn't a 1-to-1 mapping between this and the underlying "socket.read()". Also, eventually, diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi index 8e16f1b2f88..56c67dcdead 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi @@ -53,7 +53,7 @@ cdef class _BoundEventLoop: ) # NOTE(lidiz) There isn't a way to cleanly pre-check if fd monitoring # support is available or not. Checking the event loop policy is not - # good enough. The application can has its own loop implementation, or + # good enough. The application can have its own loop implementation, or # uses different types of event loops (e.g., 1 Proactor, 3 Selectors). if _has_fd_monitoring: try: @@ -117,7 +117,7 @@ cdef class PollerCompletionQueue(BaseCompletionQueue): else: with gil: # Event loops can be paused or killed at any time. So, - # instead of deligate to any thread, the polling thread + # instead of delegate to any thread, the polling thread # should handle the distribution of the event. self._handle_events(None) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi index 07669fc1575..70ffb94e582 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi @@ -17,7 +17,7 @@ cdef class AioRpcStatus(Exception): # The final status of gRPC is represented by three trailing metadata: - # `grpc-status`, `grpc-status-message`, abd `grpc-status-details`. + # `grpc-status`, `grpc-status-message`, and `grpc-status-details`. def __cinit__(self, grpc_status_code code, str details, diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi index 8332c5a0d5b..f6469fb4b81 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi @@ -542,7 +542,7 @@ async def _handle_unary_unary_rpc(object method_handler, request_raw, ) - # Creates a dedecated ServicerContext + # Creates a dedicated ServicerContext cdef _ServicerContext servicer_context = _ServicerContext( rpc_state, None, @@ -575,7 +575,7 @@ async def _handle_unary_stream_rpc(object method_handler, request_raw, ) - # Creates a dedecated ServicerContext + # Creates a dedicated ServicerContext cdef _ServicerContext servicer_context = _ServicerContext( rpc_state, method_handler.request_deserializer, @@ -623,7 +623,7 @@ cdef class _MessageReceiver: async def _handle_stream_unary_rpc(object method_handler, RPCState rpc_state, object loop): - # Creates a dedecated ServicerContext + # Creates a dedicated ServicerContext cdef _ServicerContext servicer_context = _ServicerContext( rpc_state, method_handler.request_deserializer, @@ -655,7 +655,7 @@ async def _handle_stream_unary_rpc(object method_handler, async def _handle_stream_stream_rpc(object method_handler, RPCState rpc_state, object loop): - # Creates a dedecated ServicerContext + # Creates a dedicated ServicerContext cdef _ServicerContext servicer_context = _ServicerContext( rpc_state, method_handler.request_deserializer, @@ -871,7 +871,7 @@ cdef class _ConcurrentRpcLimiter: def __cinit__(self, int maximum_concurrent_rpcs): if maximum_concurrent_rpcs <= 0: - raise ValueError("maximum_concurrent_rpcs should be a postive integer") + raise ValueError("maximum_concurrent_rpcs should be a positive integer") self._maximum_concurrent_rpcs = maximum_concurrent_rpcs self._active_rpcs = 0 self.limiter_concurrency_exceeded = False diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi index fafbc5475e6..0d071cb52cc 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi @@ -98,7 +98,7 @@ cdef class ServerCredentials: cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs cdef size_t c_ssl_pem_key_cert_pairs_count cdef list references - # the cert config related state is used only if this credentials is + # the cert config related state is used only if these credentials are # created with cert config/fetcher cdef object initial_cert_config cdef object cert_config_fetcher diff --git a/src/python/grpcio/grpc/_observability.py b/src/python/grpcio/grpc/_observability.py index 274bd829f66..be82d269e86 100644 --- a/src/python/grpcio/grpc/_observability.py +++ b/src/python/grpcio/grpc/_observability.py @@ -61,7 +61,7 @@ class ObservabilityPlugin( the gRPC team.* The ClientCallTracerCapsule and ClientCallTracerCapsule created by this - plugin should be inject to gRPC core using observability_init at the + plugin should be injected to gRPC core using observability_init at the start of a program, before any channels/servers are built. Any future methods added to this interface cannot have the @@ -93,7 +93,7 @@ class ObservabilityPlugin( Args: method_name: The method name of the call in byte format. target: The channel target of the call in byte format. - registered_method: Wether this method is pre-registered. + registered_method: Whether this method is pre-registered. Returns: A PyCapsule which stores a ClientCallTracer object. diff --git a/src/python/grpcio/grpc/_runtime_protos.py b/src/python/grpcio/grpc/_runtime_protos.py index 7ff887e6854..d0195551dfa 100644 --- a/src/python/grpcio/grpc/_runtime_protos.py +++ b/src/python/grpcio/grpc/_runtime_protos.py @@ -88,7 +88,7 @@ def protos(protobuf_path): # pylint: disable=unused-argument The returned module object corresponds to the _pb2.py file generated by protoc. The path is expected to be relative to an entry on sys.path - and all transitive dependencies of the file should also be resolveable + and all transitive dependencies of the file should also be resolvable from an entry on sys.path. To completely disable the machinery behind this function, set the @@ -96,7 +96,7 @@ def protos(protobuf_path): # pylint: disable=unused-argument Args: protobuf_path: The path to the .proto file on the filesystem. This path - must be resolveable from an entry on sys.path and so must all of its + must be resolvable from an entry on sys.path and so must all of its transitive dependencies. Returns: @@ -125,7 +125,7 @@ def services(protobuf_path): # pylint: disable=unused-argument The returned module object corresponds to the _pb2_grpc.py file generated by protoc. The path is expected to be relative to an entry on sys.path - and all transitive dependencies of the file should also be resolveable + and all transitive dependencies of the file should also be resolvable from an entry on sys.path. To completely disable the machinery behind this function, set the @@ -133,7 +133,7 @@ def services(protobuf_path): # pylint: disable=unused-argument Args: protobuf_path: The path to the .proto file on the filesystem. This path - must be resolveable from an entry on sys.path and so must all of its + must be resolvable from an entry on sys.path and so must all of its transitive dependencies. Returns: @@ -156,7 +156,7 @@ def protos_and_services(protobuf_path): # pylint: disable=unused-argument Args: protobuf_path: The path to the .proto file on the filesystem. This path - must be resolveable from an entry on sys.path and so must all of its + must be resolvable from an entry on sys.path and so must all of its transitive dependencies. Returns: diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index c8af57c0806..4080201dd0b 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -1075,7 +1075,7 @@ def _handle_call( ) -> Tuple[Optional[_RPCState], Optional[futures.Future]]: """Handles RPC based on provided handlers. - When receiving a call event from Core, registered method will have it's + When receiving a call event from Core, registered method will have its name as tag, we pass the tag as registered_method_name to this method, then we can find the handler in registered_method_handlers based on the method name. diff --git a/src/python/grpcio/grpc/aio/_base_call.py b/src/python/grpcio/grpc/aio/_base_call.py index f92de4474e3..69b89c0afdb 100644 --- a/src/python/grpcio/grpc/aio/_base_call.py +++ b/src/python/grpcio/grpc/aio/_base_call.py @@ -138,7 +138,7 @@ class Call(RpcContext, metaclass=ABCMeta): class UnaryUnaryCall( Generic[RequestType, ResponseType], Call, metaclass=ABCMeta ): - """The abstract base class of an unary-unary RPC on the client-side.""" + """The abstract base class of a unary-unary RPC on the client-side.""" @abstractmethod def __await__(self) -> Generator[Any, None, ResponseType]: diff --git a/src/python/grpcio/grpc/aio/_base_channel.py b/src/python/grpcio/grpc/aio/_base_channel.py index 715cd4f8a74..0616f243494 100644 --- a/src/python/grpcio/grpc/aio/_base_channel.py +++ b/src/python/grpcio/grpc/aio/_base_channel.py @@ -183,7 +183,7 @@ class Channel(abc.ABC): """Enables asynchronous RPC invocation as a client. Channel objects implement the Asynchronous Context Manager (aka. async - with) type, although they are not supportted to be entered and exited + with) type, although they are not supported to be entered and exited multiple times. """ @@ -312,7 +312,7 @@ class Channel(abc.ABC): whether the method is registered. Returns: - A UnarySteramMultiCallable value for the named unary-stream method. + A UnaryStreamMultiCallable value for the named unary-stream method. """ @abc.abstractmethod diff --git a/src/python/grpcio/grpc/aio/_base_server.py b/src/python/grpcio/grpc/aio/_base_server.py index 237417b8fcd..526e21aeb5e 100644 --- a/src/python/grpcio/grpc/aio/_base_server.py +++ b/src/python/grpcio/grpc/aio/_base_server.py @@ -197,7 +197,7 @@ class ServicerContext(Generic[RequestType, ResponseType], abc.ABC): ) -> NoReturn: """Raises an exception to terminate the RPC with a non-OK status. - The code and details passed as arguments will supercede any existing + The code and details passed as arguments will supersede any existing ones. Args: diff --git a/src/python/grpcio/grpc/aio/_interceptor.py b/src/python/grpcio/grpc/aio/_interceptor.py index 1401a08ee4c..1d609534108 100644 --- a/src/python/grpcio/grpc/aio/_interceptor.py +++ b/src/python/grpcio/grpc/aio/_interceptor.py @@ -477,8 +477,8 @@ class _InterceptedStreamResponseMixin: _response_aiter: Optional[AsyncIterable[ResponseType]] def _init_stream_response_mixin(self) -> None: - # Is initalized later, otherwise if the iterator is not finally - # consumed a logging warning is emmited by Asyncio. + # Is initialized later, otherwise if the iterator is not finally + # consumed a logging warning is emitted by Asyncio. self._response_aiter = None async def _wait_for_interceptor_task_response_iterator( @@ -1143,10 +1143,10 @@ class _StreamCallResponseIterator: class UnaryStreamCallResponseIterator( _StreamCallResponseIterator, _base_call.UnaryStreamCall ): - """UnaryStreamCall class wich uses an alternative response iterator.""" + """UnaryStreamCall class which uses an alternative response iterator.""" async def read(self) -> Union[EOFType, ResponseType]: - # Behind the scenes everyting goes through the + # Behind the scenes everything goes through the # async iterator. So this path should not be reached. raise NotImplementedError() @@ -1154,21 +1154,21 @@ class UnaryStreamCallResponseIterator( class StreamStreamCallResponseIterator( _StreamCallResponseIterator, _base_call.StreamStreamCall ): - """StreamStreamCall class wich uses an alternative response iterator.""" + """StreamStreamCall class which uses an alternative response iterator.""" async def read(self) -> Union[EOFType, ResponseType]: - # Behind the scenes everyting goes through the + # Behind the scenes everything goes through the # async iterator. So this path should not be reached. raise NotImplementedError() async def write(self, request: RequestType) -> None: - # Behind the scenes everyting goes through the + # Behind the scenes everything goes through the # async iterator provided by the InterceptedStreamStreamCall. # So this path should not be reached. raise NotImplementedError() async def done_writing(self) -> None: - # Behind the scenes everyting goes through the + # Behind the scenes everything goes through the # async iterator provided by the InterceptedStreamStreamCall. # So this path should not be reached. raise NotImplementedError() diff --git a/src/python/grpcio/grpc/framework/interfaces/base/base.py b/src/python/grpcio/grpc/framework/interfaces/base/base.py index d1c0b079116..ea71ff6a181 100644 --- a/src/python/grpcio/grpc/framework/interfaces/base/base.py +++ b/src/python/grpcio/grpc/framework/interfaces/base/base.py @@ -83,7 +83,7 @@ class Completion(abc.ABC): """An aggregate of the values exchanged upon operation completion. Attributes: - terminal_metadata: A terminal metadata value for the operaton. + terminal_metadata: A terminal metadata value for the operation. code: A code value for the operation. message: A message value for the operation. """ diff --git a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py index d188339b1eb..5b601f3bde8 100644 --- a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py +++ b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py @@ -57,7 +57,7 @@ def completion(terminal_metadata, code, message): """Creates a base.Completion aggregating the given operation values. Args: - terminal_metadata: A terminal metadata value for an operaton. + terminal_metadata: A terminal metadata value for an operation. code: A code value for an operation. message: A message value for an operation. diff --git a/src/python/grpcio/grpc/framework/interfaces/face/utilities.py b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py index 01807a16026..b02ea530963 100644 --- a/src/python/grpcio/grpc/framework/interfaces/face/utilities.py +++ b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py @@ -174,7 +174,7 @@ def unary_stream_event(behavior): Args: behavior: The implementation of a unary-stream RPC method as a callable value that takes a request value, a stream.Consumer to which to pass the - the response values of the RPC, and an face.ServicerContext. + response values of the RPC, and an face.ServicerContext. Returns: An face.MethodImplementation derived from the given behavior. diff --git a/src/python/grpcio_csm_observability/grpc_csm_observability/_csm_observability_plugin.py b/src/python/grpcio_csm_observability/grpc_csm_observability/_csm_observability_plugin.py index 0dd4246dcd9..ce597557075 100644 --- a/src/python/grpcio_csm_observability/grpc_csm_observability/_csm_observability_plugin.py +++ b/src/python/grpcio_csm_observability/grpc_csm_observability/_csm_observability_plugin.py @@ -207,7 +207,7 @@ class CsmOpenTelemetryPluginOption(OpenTelemetryPluginOption): target: Required. The target for the RPC. Returns: - True if this this plugin option is active on the channel, false otherwise. + True if this plugin option is active on the channel, false otherwise. """ # CSM channels should have an "xds" scheme if not target.startswith("xds:"): @@ -237,7 +237,7 @@ class CsmOpenTelemetryPluginOption(OpenTelemetryPluginOption): xds: Required. if this server is build for xds. Returns: - True if this this plugin option is active on the server, false otherwise. + True if this plugin option is active on the server, false otherwise. """ return True diff --git a/src/python/grpcio_observability/_parallel_compile_patch.py b/src/python/grpcio_observability/_parallel_compile_patch.py index 9c98aa92436..e91dee1eb0a 100644 --- a/src/python/grpcio_observability/_parallel_compile_patch.py +++ b/src/python/grpcio_observability/_parallel_compile_patch.py @@ -18,7 +18,7 @@ # instead. This file can be regenerated from the template by running # `tools/buildgen/generate_projects.sh`. -"""Patches the compile() to allow enable parallel compilation of C/C++. +"""Patches the compile() to enable parallel compilation of C/C++. build_ext has lots of C/C++ files and normally them one by one. Enabling parallel build helps a lot. diff --git a/src/python/grpcio_observability/grpc_observability/_cyobservability.pyx b/src/python/grpcio_observability/grpc_observability/_cyobservability.pyx index c90ee1927a2..876b9799ecf 100644 --- a/src/python/grpcio_observability/grpc_observability/_cyobservability.pyx +++ b/src/python/grpcio_observability/grpc_observability/_cyobservability.pyx @@ -179,7 +179,7 @@ def _c_measurement_to_measurement(object measurement """Convert Cython Measurement to Python measurement. Args: - measurement: Actual measurement repesented by Cython type Measurement, using object here + measurement: Actual measurement represented by Cython type Measurement, using object here since Cython refuse to automatically convert a union with unsafe type combinations. Returns: @@ -308,7 +308,7 @@ cdef void _export_census_data(object exporter): while not GLOBAL_SHUTDOWN_EXPORT_THREAD: lk = new unique_lock[mutex](g_census_data_buffer_mutex) # Wait for next batch of census data OR timeout at fixed interval. - # Batch export census data to minimize the time we acquiring the GIL. + # Batch export census data to minimize the time we acquire the GIL. AwaitNextBatchLocked(dereference(lk), export_interval_ms) # Break only when buffer have data diff --git a/src/python/grpcio_observability/grpc_observability/_measures.py b/src/python/grpcio_observability/grpc_observability/_measures.py index 122e1eb5a59..fc9e0a86c8b 100644 --- a/src/python/grpcio_observability/grpc_observability/_measures.py +++ b/src/python/grpcio_observability/grpc_observability/_measures.py @@ -17,7 +17,7 @@ from opencensus.stats import measure # These measure definitions should be kept in sync across opencensus implementations. # https://github.com/census-instrumentation/opencensus-java/blob/master/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcMeasureConstants.java. -# Unit constatns +# Unit constants UNIT_BYTES = "By" UNIT_MILLISECONDS = "ms" UNIT_COUNT = "1" diff --git a/src/python/grpcio_observability/grpc_observability/_open_census_exporter.py b/src/python/grpcio_observability/grpc_observability/_open_census_exporter.py index 8dbccfdd156..2ec0b25d779 100644 --- a/src/python/grpcio_observability/grpc_observability/_open_census_exporter.py +++ b/src/python/grpcio_observability/grpc_observability/_open_census_exporter.py @@ -123,7 +123,7 @@ class OpenCensusExporter(_observability.Exporter): if not measure: continue # Create a measurement map for each metric, otherwise metrics will - # be override instead of accumulate. + # be overridden instead of accumulate. measurement_map = self.stats_recorder.new_measurement_map() # Add data label to default labels. labels = data.labels diff --git a/src/python/grpcio_observability/grpc_observability/_open_telemetry_observability.py b/src/python/grpcio_observability/grpc_observability/_open_telemetry_observability.py index dd3a3a563d4..4cb6eb9f8c3 100644 --- a/src/python/grpcio_observability/grpc_observability/_open_telemetry_observability.py +++ b/src/python/grpcio_observability/grpc_observability/_open_telemetry_observability.py @@ -383,7 +383,7 @@ class OpenTelemetryObservability(grpc._observability.ObservabilityPlugin): try: _cyobservability.cyobservability_init(self._exporter) - # TODO(xuanwn): Use specific exceptons + # TODO(xuanwn): Use specific exceptions except Exception as e: # pylint: disable=broad-except _LOGGER.exception("Initiate observability failed with: %s", e) diff --git a/src/python/grpcio_observability/grpc_observability/python_observability_context.h b/src/python/grpcio_observability/grpc_observability/python_observability_context.h index 9c347098933..9955172bf2e 100644 --- a/src/python/grpcio_observability/grpc_observability/python_observability_context.h +++ b/src/python/grpcio_observability/grpc_observability/python_observability_context.h @@ -208,11 +208,11 @@ class Span final { uint64_t child_span_count_ = 0; }; -// PythonCensusContext is associated with each clientCallTrcer, +// PythonCensusContext is associated with each clientCallTracer, // clientCallAttemptTracer and ServerCallTracer to help manage the span, -// spanContext and labels for each tracer. Craete a new PythonCensusContext will -// always reasult in creating a new span (and a new SpanContext for that span). -// It's created during callTraceer initialization and will be destroyed after +// spanContext and labels for each tracer. Create a new PythonCensusContext will +// always result in creating a new span (and a new SpanContext for that span). +// It's created during callTracer initialization and will be destroyed after // the destruction of each callTracer. class PythonCensusContext { public: diff --git a/src/python/grpcio_tests/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py index b9fc3ccf0f6..c3b2694060a 100644 --- a/src/python/grpcio_tests/tests/_loader.py +++ b/src/python/grpcio_tests/tests/_loader.py @@ -55,7 +55,7 @@ class Loader(object): Attributes: suite (unittest.TestSuite): All tests collected by the loader. - loader (unittest.TestLoader): Standard Python unittest loader to be ran per + loader (unittest.TestLoader): Standard Python unittest loader to be run per module discovered. module_matcher (re.RegexObject): A regular expression object to match against module names and determine whether or not the discovered module diff --git a/src/python/grpcio_tests/tests/qps/client_runner.py b/src/python/grpcio_tests/tests/qps/client_runner.py index eca0155483c..6a67905ccd1 100644 --- a/src/python/grpcio_tests/tests/qps/client_runner.py +++ b/src/python/grpcio_tests/tests/qps/client_runner.py @@ -70,7 +70,7 @@ class ClosedLoopClientRunner(ClientRunner): super(ClosedLoopClientRunner, self).__init__(client) self._is_running = False self._request_count = request_count - # For server-streaming RPC, don't spawn new RPC after each responses. + # For server-streaming RPC, don't spawn new RPC after each response. # This yield at most ~17% for single RPC scenarios. if not no_ping_pong: # Send a new request on each response for closed loop diff --git a/src/python/grpcio_tests/tests/status/_grpc_status_test.py b/src/python/grpcio_tests/tests/status/_grpc_status_test.py index 031bdbe4d53..27c2e23a160 100644 --- a/src/python/grpcio_tests/tests/status/_grpc_status_test.py +++ b/src/python/grpcio_tests/tests/status/_grpc_status_test.py @@ -200,7 +200,7 @@ class StatusTest(unittest.TestCase): ).with_call(_REQUEST) rpc_error = exception_context.exception self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN) - # Invalid status code exception raised during coversion + # Invalid status code exception raised during conversion self.assertIn("Invalid status code", rpc_error.details()) diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py index ac373e86328..4c2d6ed56b9 100644 --- a/src/python/grpcio_tests/tests/unit/_compression_test.py +++ b/src/python/grpcio_tests/tests/unit/_compression_test.py @@ -296,7 +296,7 @@ class CompressionTest(unittest.TestCase): self.assertGreaterEqual( compression_ratio, -1.0 * _COMPRESSION_RATIO_THRESHOLD, - msg="Actual compession ratio: {}".format(compression_ratio), + msg="Actual compression ratio: {}".format(compression_ratio), ) def assertConfigurationCompressed( diff --git a/src/python/grpcio_tests/tests/unit/_contextvars_propagation_test.py b/src/python/grpcio_tests/tests/unit/_contextvars_propagation_test.py index 3793d20d28e..5235530e865 100644 --- a/src/python/grpcio_tests/tests/unit/_contextvars_propagation_test.py +++ b/src/python/grpcio_tests/tests/unit/_contextvars_propagation_test.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Test of propagation of contextvars to AuthMetadataPlugin threads..""" +"""Test of propagation of contextvars to AuthMetadataPlugin threads.""" import contextlib import logging diff --git a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py index 7f87b2b8a8c..31a63b2371d 100644 --- a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py @@ -251,7 +251,7 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): ) server_call_driver.events() - client_recieve_initial_metadata_event = ( + client_receive_initial_metadata_event = ( client_receive_initial_metadata_event_future.result() ) diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py index 8ff46107625..98a2611c97a 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_test.py +++ b/src/python/grpcio_tests/tests/unit/_exit_test.py @@ -62,7 +62,7 @@ atexit.register(cleanup_processes) def _process_wait_with_timeout(process, timeout=WAIT_CHECK_DEFAULT_TIMEOUT): - """A funciton to mimic 3.3+ only timeout argument in process.wait.""" + """A function to mimic 3.3+ only timeout argument in process.wait.""" deadline = datetime.datetime.now() + timeout while (process.poll() is None) and (datetime.datetime.now() < deadline): time.sleep(WAIT_CHECK_INTERVAL.total_seconds()) diff --git a/src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py b/src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py index 75716d32a91..93e1db9b31d 100644 --- a/src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py +++ b/src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py @@ -39,7 +39,7 @@ class GrpcShutdownTest(unittest.TestCase): ): connection_failed.set() - # Connects to an void address, and subscribes state changes + # Connects to a void address, and subscribes state changes channel = grpc.insecure_channel("0.1.1.1:12345") channel.subscribe(on_state_change, True) diff --git a/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py index 40dfc85e5cb..ec46ba27542 100644 --- a/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py +++ b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py @@ -234,7 +234,7 @@ class MetadataFlagsTest(unittest.TestCase): # To test the wait mechanism, Python thread is required to make # client set up first without handling them case by case. # Also, Python thread don't pass the unhandled exceptions to - # main thread. So, it need another method to store the + # main thread. So, it needs another method to store the # exceptions and raise them again in main thread. unhandled_exceptions = queue.Queue() diff --git a/src/python/grpcio_tests/tests_aio/status/grpc_status_test.py b/src/python/grpcio_tests/tests_aio/status/grpc_status_test.py index 4e7dbb12ba1..b7eaf49b26b 100644 --- a/src/python/grpcio_tests/tests_aio/status/grpc_status_test.py +++ b/src/python/grpcio_tests/tests_aio/status/grpc_status_test.py @@ -174,7 +174,7 @@ class StatusTest(AioTestBase): await self._channel.unary_unary(_INVALID_CODE)(_REQUEST) rpc_error = exception_context.exception self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN) - # Invalid status code exception raised during coversion + # Invalid status code exception raised during conversion self.assertIn("Invalid status code", rpc_error.details()) diff --git a/src/python/grpcio_tests/tests_aio/unit/_common.py b/src/python/grpcio_tests/tests_aio/unit/_common.py index b29dfb4889e..da294af3bfe 100644 --- a/src/python/grpcio_tests/tests_aio/unit/_common.py +++ b/src/python/grpcio_tests/tests_aio/unit/_common.py @@ -51,7 +51,7 @@ def inject_callbacks(call: aio.Call): first_callback_ran = asyncio.Event() def first_callback(call): - # Validate that all resopnses have been received + # Validate that all responses have been received # and the call is an end state. assert call.done() first_callback_ran.set() diff --git a/src/python/grpcio_tests/tests_aio/unit/_metadata_test.py b/src/python/grpcio_tests/tests_aio/unit/_metadata_test.py index 17478fce89d..0c0cc8a77d8 100644 --- a/src/python/grpcio_tests/tests_aio/unit/_metadata_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/_metadata_test.py @@ -100,7 +100,7 @@ class TestTypeMetadata(unittest.TestCase): def test_init_metadata(self): test_cases = { - "emtpy": (), + "empty": (), "with-single-data": self._DEFAULT_DATA, "with-multi-data": self._MULTI_ENTRY_DATA, } diff --git a/src/python/grpcio_tests/tests_aio/unit/_test_server.py b/src/python/grpcio_tests/tests_aio/unit/_test_server.py index 5d25272f300..662b5b3b98e 100644 --- a/src/python/grpcio_tests/tests_aio/unit/_test_server.py +++ b/src/python/grpcio_tests/tests_aio/unit/_test_server.py @@ -102,7 +102,7 @@ class TestServiceServicer(test_pb2_grpc.TestServiceServicer): else: yield messages_pb2.StreamingOutputCallResponse() - # Next methods are extra ones that are registred programatically + # Next methods are extra ones that are registered programmatically # when the sever is instantiated. They are not being provided by # the proto file. async def UnaryCallWithSleep(self, unused_request, unused_context): @@ -144,7 +144,7 @@ class TestServiceServicer(test_pb2_grpc.TestServiceServicer): def _create_extra_generic_handler(servicer: TestServiceServicer): - # Add programatically extra methods not provided by the proto file + # Add programmatically extra methods not provided by the proto file # that are used during the tests rpc_method_handlers = { "UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler( diff --git a/src/python/grpcio_tests/tests_aio/unit/call_test.py b/src/python/grpcio_tests/tests_aio/unit/call_test.py index fba0a0840bf..bcbcbf555fa 100644 --- a/src/python/grpcio_tests/tests_aio/unit/call_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/call_test.py @@ -411,7 +411,7 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase): Certain classes of error only appear for very specific interleavings of coroutines. Rather than inserting semi-private asyncio.Events throughout - the implementation on which to coordinate and explicilty waiting on those + the implementation on which to coordinate and explicitly waiting on those in tests, we instead search for bugs over the space of interleavings by stochastically varying the durations of certain events within the test. """ diff --git a/src/python/grpcio_tests/tests_aio/unit/client_stream_unary_interceptor_test.py b/src/python/grpcio_tests/tests_aio/unit/client_stream_unary_interceptor_test.py index 106be6cc349..42f49d04595 100644 --- a/src/python/grpcio_tests/tests_aio/unit/client_stream_unary_interceptor_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/client_stream_unary_interceptor_test.py @@ -431,7 +431,7 @@ class TestStreamUnaryClientInterceptor(AioTestBase): await channel.close() async def test_cancel_while_writing(self): - # Test cancelation before making any write or after doing at least 1 + # Test cancellation before making any write or after doing at least 1 for num_writes_before_cancel in (0, 1): with self.subTest( name="Num writes before cancel: {}".format( diff --git a/src/python/grpcio_tests/tests_aio/unit/client_unary_unary_interceptor_test.py b/src/python/grpcio_tests/tests_aio/unit/client_unary_unary_interceptor_test.py index 4789b309504..2617da9dd97 100644 --- a/src/python/grpcio_tests/tests_aio/unit/client_unary_unary_interceptor_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/client_unary_unary_interceptor_test.py @@ -225,7 +225,7 @@ class TestUnaryUnaryClientInterceptor(AioTestBase): self.assertEqual(grpc.StatusCode.OK, await call.code()) # Check that two calls were made, first one finishing with - # a deadline and second one finishing ok.. + # a deadline and second one finishing ok. self.assertEqual(len(interceptor.calls), 2) self.assertEqual( await interceptor.calls[0].code(), diff --git a/src/python/grpcio_tests/tests_aio/unit/connectivity_test.py b/src/python/grpcio_tests/tests_aio/unit/connectivity_test.py index 9e3ae3af7b4..058ed23c493 100644 --- a/src/python/grpcio_tests/tests_aio/unit/connectivity_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/connectivity_test.py @@ -116,7 +116,7 @@ class TestConnectivityState(AioTestBase): # Make sure there isn't any exception in the task await pending_task - # It can raise exceptions since it is an usage error, but it should not + # It can raise exceptions since it is a usage error, but it should not # segfault or abort. with self.assertRaises(aio.UsageError): await channel.wait_for_state_change( diff --git a/src/python/grpcio_tests/tests_gevent/unit/_test_server.py b/src/python/grpcio_tests/tests_gevent/unit/_test_server.py index 68c36093151..daa1a2a0698 100644 --- a/src/python/grpcio_tests/tests_gevent/unit/_test_server.py +++ b/src/python/grpcio_tests/tests_gevent/unit/_test_server.py @@ -47,7 +47,7 @@ def start_test_server(port: int = 0) -> Tuple[str, Any]: def _create_extra_generic_handler(servicer: TestServiceServicer) -> Any: - # Add programatically extra methods not provided by the proto file + # Add programmatically extra methods not provided by the proto file # that are used during the tests rpc_method_handlers = { "UnaryCallWithSleep": grpc.unary_unary_rpc_method_handler( diff --git a/templates/src/python/_parallel_compile_patch.py.include b/templates/src/python/_parallel_compile_patch.py.include index 0f778643373..aced9bce238 100644 --- a/templates/src/python/_parallel_compile_patch.py.include +++ b/templates/src/python/_parallel_compile_patch.py.include @@ -1,4 +1,4 @@ -"""Patches the compile() to allow enable parallel compilation of C/C++. +"""Patches the compile() to enable parallel compilation of C/C++. build_ext has lots of C/C++ files and normally them one by one. Enabling parallel build helps a lot. diff --git a/tools/distrib/python/grpcio_tools/_parallel_compile_patch.py b/tools/distrib/python/grpcio_tools/_parallel_compile_patch.py index 8852f54e944..4d333b318d9 100644 --- a/tools/distrib/python/grpcio_tools/_parallel_compile_patch.py +++ b/tools/distrib/python/grpcio_tools/_parallel_compile_patch.py @@ -18,7 +18,7 @@ # instead. This file can be regenerated from the template by running # `tools/buildgen/generate_projects.sh`. -"""Patches the compile() to allow enable parallel compilation of C/C++. +"""Patches the compile() to enable parallel compilation of C/C++. build_ext has lots of C/C++ files and normally them one by one. Enabling parallel build helps a lot.