Merge pull request #25380 from yashykt/removedisrespecfulterm3

Replace disrespectful words
pull/25405/head
Yash Tibrewal 4 years ago committed by GitHub
commit 15779f7b91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      package.xml
  2. 2
      src/core/lib/iomgr/timer_manager.cc
  3. 2
      src/csharp/Grpc.Core.Tests/ShutdownTest.cs
  4. 2
      src/csharp/Grpc.Core/GrpcEnvironment.cs
  5. 2
      src/objective-c/NetworkTransitionBehavior.md
  6. 2
      src/php/docker/README.md
  7. 2
      src/python/grpcio/grpc/_channel.py
  8. 2
      src/python/grpcio_tests/tests/fork/_fork_interop_test.py
  9. 2
      src/python/grpcio_tests/tests/unit/_exit_test.py
  10. 2
      src/python/grpcio_tests/tests/unit/_server_shutdown_test.py
  11. 4
      src/python/grpcio_tests/tests/unit/_signal_client.py
  12. 2
      src/python/grpcio_tests/tests/unit/framework/common/test_control.py
  13. 2
      src/ruby/end2end/channel_closing_client.rb
  14. 2
      src/ruby/end2end/channel_closing_test.rb
  15. 2
      src/ruby/end2end/channel_state_test.rb
  16. 2
      src/ruby/end2end/killed_client_thread_test.rb
  17. 8
      src/ruby/spec/generic/client_stub_spec.rb
  18. 2
      templates/package.xml.template
  19. 2
      test/core/end2end/multiple_server_queues_test.cc
  20. 2
      test/core/end2end/tests/filter_init_fails.cc
  21. 2
      test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.cc
  22. 4
      test/cpp/end2end/cfstream_test.cc
  23. 2
      tools/internal_ci/linux/grpc_microbenchmark_diff.sh
  24. 2
      tools/internal_ci/linux/grpc_performance_profile_daily.sh
  25. 2
      tools/internal_ci/linux/grpc_performance_profile_master.sh
  26. 2
      tools/internal_ci/linux/grpc_trickle_diff.sh
  27. 2
      tools/internal_ci/macos/grpc_run_bazel_isolated_tests.sh
  28. 2
      tools/internal_ci/macos/grpc_run_tests_matrix.sh
  29. 2
      tools/run_tests/artifacts/artifact_targets.py

@ -2191,7 +2191,7 @@ Update to wrap gRPC C Core version 0.10.0
<license>BSD</license>
<notes>
- GA release
- Fix shutdown hang problem #4017
- Fix shutdown freeze problem #4017
</notes>
</release>
<release>

@ -133,7 +133,7 @@ static void run_some_timers() {
}
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(&g_mu);
// garbage collect any threads hanging out that are dead
// garbage collect any threads that are dead
gc_completed_threads();
// get ready to wait again
++g_waiter_count;

@ -59,7 +59,7 @@ namespace Grpc.Core.Tests
await readyToShutdown.Task; // make sure handler is running
await channel.ShutdownAsync(); // channel.ShutdownAsync() works even if there's a pending call.
await server.KillAsync(); // server.ShutdownAsync() would hang waiting for the call to finish.
await server.KillAsync(); // server.ShutdownAsync() would freeze waiting for the call to finish.
}
}
}

@ -465,7 +465,7 @@ namespace Grpc.Core
// when the framework attempts to run the finalizers for SafeHandle object representing the native
// grpc objects. The finalizers calls the native grpc_*_destroy methods (e.g. grpc_server_destroy)
// in a random order, which is not supported by gRPC.
// - On Mono, the process would hang as the GrpcThreadPool threads are sleeping
// - On Mono, the process would freeze as the GrpcThreadPool threads are sleeping
// in grpc_completion_queue_next P/Invoke invocation and mono won't let the
// process shutdown until the P/Invoke calls return. We achieve that by shutting down
// the completion queue(s) which associated with the GrpcThreadPool, which will

@ -50,7 +50,7 @@ using TCP sockets:
background. When a TCP connection breaks in the background for the reason
like WiFi connects to another hotspot, the socket neither return an error nor
continue sending/receiving data on it, but still accepts write on it.
In both situations, the user will see the call hang for an extended period of
In both situations, the user will see the call freeze for an extended period of
time before the TCP socket times out.
#### gRPC iOS library's resolution to TCP socket issues

@ -168,7 +168,7 @@ NOTE: PHP 5.x has reached the end-of-life state and is no longer supported.
### `fork-support`
This image tests `pcntl_fork()` support and makes sure scripts using
`pcntl_fork()` don't hang or crash.
`pcntl_fork()` don't freeze or crash.
Build `grpc-ext` docker image:
```sh

@ -1546,7 +1546,7 @@ class Channel(grpc.Channel):
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# has passed for all users to be trusted to freeze out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.

@ -23,7 +23,7 @@ from grpc._cython import cygrpc
from tests.fork import methods
# New instance of multiprocessing.Process using fork without exec can and will
# hang if the Python process has any other threads running. This includes the
# freeze if the Python process has any other threads running. This includes the
# additional thread spawned by our _runner.py class. So in order to test our
# compatibility with multiprocessing, we first fork+exec a new process to ensure
# we don't have any conflicting background threads.

@ -14,7 +14,7 @@
"""Tests clean exit of server/client on Python Interpreter exit/sigint.
The tests in this module spawn a subprocess for each test case, the
test is considered successful if it doesn't hang/timeout.
test is considered successful if it doesn't freeze/timeout.
"""
import atexit

@ -14,7 +14,7 @@
"""Tests clean shutdown of server on various interpreter exit conditions.
The tests in this module spawn a subprocess for each test case, the
test is considered successful if it doesn't hang/timeout.
test is considered successful if it doesn't freeze/timeout.
"""
import atexit

@ -82,7 +82,7 @@ def main_unary_with_exception(server_target):
sys.stderr.write("Running signal handler.\n")
sys.stderr.flush()
# This call should not hang.
# This call should not freeze.
channel.close()
@ -97,7 +97,7 @@ def main_streaming_with_exception(server_target):
sys.stderr.write("Running signal handler.\n")
sys.stderr.flush()
# This call should not hang.
# This call should not freeze.
channel.close()

@ -34,7 +34,7 @@ class Control(six.with_metaclass(abc.ABCMeta)):
Systems under test passed a Control should call its control() method
frequently during execution. The control() method may block, raise an
exception, or do nothing, all according to the enclosing test's desire for
the system under test to simulate hanging, failing, or functioning.
the system under test to simulate freezing, failing, or functioning.
"""
@abc.abstractmethod

@ -17,7 +17,7 @@
require_relative './end2end_common'
# Calls '#close' on a Channel when "shutdown" called. This tries to
# trigger a hang or crash bug by closing a channel actively being watched
# trigger a freeze or crash bug by closing a channel actively being watched
class ChannelClosingClientController < ClientControl::ClientController::Service
def initialize(ch)
@ch = ch

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# make sure that the client doesn't hang when channel is closed
# make sure that the client doesn't freeze when channel is closed
# explicitly while it's used
require_relative './end2end_common'

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# make sure that the client doesn't hang when process ended abruptly
# make sure that the client doesn't freeze when process ended abruptly
require_relative './end2end_common'

@ -46,7 +46,7 @@ def main
end
service_impl = SleepingEchoServerImpl.new(received_rpc_callback)
# RPCs against the server will all be hanging, so kill thread
# RPCs against the server will all be freezing, so kill thread
# pool workers immediately rather than after waiting for a second.
rpc_server_args = { poll_period: 0, pool_keep_alive: 0 }
server_runner = ServerRunner.new(service_impl, rpc_server_args: rpc_server_args)

@ -304,7 +304,7 @@ describe 'ClientStub' do # rubocop:disable Metrics/BlockLength
describe 'via a call operation' do
after(:each) do
# make sure op.wait doesn't hang, even if there's a bad status
# make sure op.wait doesn't freeze, even if there's a bad status
@op.wait
end
def get_response(stub, run_start_call_first: false, credentials: nil)
@ -406,7 +406,7 @@ describe 'ClientStub' do # rubocop:disable Metrics/BlockLength
describe 'via a call operation' do
after(:each) do
# make sure op.wait doesn't hang, even if there's a bad status
# make sure op.wait doesn't freeze, even if there's a bad status
@op.wait
end
def get_response(stub, run_start_call_first: false)
@ -547,7 +547,7 @@ describe 'ClientStub' do # rubocop:disable Metrics/BlockLength
describe 'via a call operation' do
after(:each) do
@op.wait # make sure wait doesn't hang
@op.wait # make sure wait doesn't freeze
end
def get_responses(stub, run_start_call_first: false, unmarshal: noop)
@op = stub.server_streamer(@method, @sent_msg, noop, unmarshal,
@ -865,7 +865,7 @@ describe 'ClientStub' do # rubocop:disable Metrics/BlockLength
describe 'via a call operation' do
after(:each) do
@op.wait # make sure wait doesn't hang
@op.wait # make sure wait doesn't freeze
end
def get_responses(stub, run_start_call_first: false, deadline: nil,
marshal_proc: noop)

@ -221,7 +221,7 @@
<license>BSD</license>
<notes>
- GA release
- Fix shutdown hang problem #4017
- Fix shutdown freeze problem #4017
</notes>
</release>
<release>

@ -53,7 +53,7 @@ int main(int argc, char** argv) {
grpc_server_start(server);
grpc_server_shutdown_and_notify(server, cq2, nullptr);
grpc_completion_queue_next(cq2, gpr_inf_future(GPR_CLOCK_REALTIME),
nullptr); /* cue queue hang */
nullptr); /* cue queue freeze */
grpc_completion_queue_shutdown(cq1);
grpc_completion_queue_shutdown(cq2);
grpc_completion_queue_shutdown(cq3);

@ -538,7 +538,7 @@ static void filter_init_fails_internal(grpc_end2end_test_config config) {
// If the client handshake completes before the server handshake and the
// client is able to send application data before the server handshake
// completes, then testing the CLIENT_SUBCHANNEL filter will cause the server
// to hang waiting for the final handshake message from the client. This
// to freeze waiting for the final handshake message from the client. This
// handshake message will never arrive because it would have been sent with
// the first application data message, which failed because of the filter.
if ((config.feature_mask & FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL) &&

@ -55,7 +55,7 @@ void TryConnectAndDestroy() {
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
// Return a grpclb address with an IP address on the IPv6 discard prefix
// (https://tools.ietf.org/html/rfc6666). This is important because
// the behavior we want in this test is for a TCP connect attempt to "hang",
// the behavior we want in this test is for a TCP connect attempt to "freeze",
// i.e. we want to send SYN, and then *not* receive SYN-ACK or RST.
// The precise behavior is dependant on the test runtime environment though,
// since connect() attempts on this address may unfortunately result in

@ -190,9 +190,9 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
} else {
GPR_ASSERT(ret == grpc::CompletionQueue::TIMEOUT);
// This can happen if we hit the Apple CFStream bug which results in the
// read stream hanging. We are ignoring hangs and timeouts, but these
// read stream freezing. We are ignoring hangs and timeouts, but these
// tests are still useful as they can catch memory memory corruptions,
// crashes and other bugs that don't result in test hang/timeout.
// crashes and other bugs that don't result in test freeze/timeout.
return false;
}
}

@ -31,7 +31,7 @@ tools/internal_ci/linux/run_if_c_cpp_modified.sh tools/profiling/microbenchmarks
-d "origin/$KOKORO_GITHUB_PULL_REQUEST_TARGET_BRANCH" \
-b $BENCHMARKS_TO_RUN || FAILED="true"
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9
if [ "$FAILED" != "" ]

@ -26,7 +26,7 @@ CPUS=`python -c 'import multiprocessing; print multiprocessing.cpu_count()'`
tools/run_tests/run_microbenchmark.py --collect summary --bigquery_upload || FAILED="true"
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9
if [ "$FAILED" != "" ]

@ -22,7 +22,7 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_perf_rc
tools/internal_ci/linux/run_performance_profile_hourly.sh || FAILED="true"
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9
if [ "$FAILED" != "" ]

@ -33,7 +33,7 @@ tools/internal_ci/linux/run_if_c_cpp_modified.sh tools/profiling/microbenchmarks
--no-counters \
--pr_comment_name trickle || FAILED="true"
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9
if [ "$FAILED" != "" ]

@ -38,6 +38,6 @@ sudo sntp -sS pool.ntp.org
# The "local" execution strategy is required because the test runs sudo and that doesn't work in a sandboxed environment (the default on mac)
tools/bazel test $RUN_TESTS_FLAGS --genrule_strategy=local --test_output=all //test/cpp/common:time_jump_test
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9

@ -22,7 +22,7 @@ source tools/internal_ci/helper_scripts/prepare_build_macos_rc
tools/run_tests/run_tests_matrix.py $RUN_TESTS_FLAGS || FAILED="true"
# kill port_server.py to prevent the build from hanging
# kill port_server.py to prevent the build from freezing
ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9
tools/internal_ci/helper_scripts/delete_nonartifacts.sh || true

@ -123,7 +123,7 @@ class PythonArtifact:
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
# A QEMU bug causes submodule update to freeze, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
# Parallel builds are counterproductive in emulated environment
environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'] = '1'

Loading…
Cancel
Save