Merge branch 'master' into callback-rpc-lock

pull/19040/head
Karthik Ravi Shankar 6 years ago
commit d243f3dbe3
  1. 14
      BUILD
  2. 5
      BUILD.gn
  3. 5
      CMakeLists.txt
  4. 5
      Makefile
  5. 8
      build.yaml
  6. 10
      doc/environment_variables.md
  7. 32
      examples/python/wait_for_ready/BUILD.bazel
  8. 32
      examples/python/wait_for_ready/README.md
  9. 31
      examples/python/wait_for_ready/test/_wait_for_ready_example_test.py
  10. 114
      examples/python/wait_for_ready/wait_for_ready_example.py
  11. 7
      gRPC-C++.podspec
  12. 4
      gRPC-Core.podspec
  13. 2
      grpc.gemspec
  14. 2
      include/grpc/grpc_security.h
  15. 10
      include/grpc/impl/codegen/port_platform.h
  16. 2
      include/grpc/slice.h
  17. 3
      include/grpcpp/channel.h
  18. 3
      include/grpcpp/impl/codegen/client_context.h
  19. 10
      include/grpcpp/impl/codegen/completion_queue.h
  20. 2
      include/grpcpp/impl/codegen/server_interface.h
  21. 151
      include/grpcpp/impl/codegen/sync.h
  22. 6
      include/grpcpp/impl/codegen/sync_stream.h
  23. 8
      include/grpcpp/server.h
  24. 8
      include/grpcpp/server_impl.h
  25. 2
      package.xml
  26. 2197
      src/core/ext/filters/client_channel/client_channel.cc
  27. 4
      src/core/ext/filters/client_channel/health/health_check_client.cc
  28. 3
      src/core/ext/filters/client_channel/health/health_check_client.h
  29. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  30. 1
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  31. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
  32. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  33. 6
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  34. 6
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  35. 178
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  36. 8
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  37. 2
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  38. 58
      src/core/ext/filters/client_channel/subchannel.cc
  39. 3
      src/core/ext/filters/client_channel/subchannel.h
  40. 2
      src/core/lib/channel/channelz_registry.cc
  41. 2
      src/core/lib/channel/handshaker.h
  42. 2
      src/core/lib/compression/compression_args.h
  43. 50
      src/core/lib/gpr/arena.cc
  44. 2
      src/core/lib/gpr/arena.h
  45. 42
      src/core/lib/gprpp/mutex_lock.h
  46. 126
      src/core/lib/gprpp/sync.h
  47. 2
      src/core/lib/iomgr/ev_epollex_linux.cc
  48. 3
      src/core/lib/iomgr/iomgr_custom.cc
  49. 2
      src/core/lib/iomgr/iomgr_custom.h
  50. 3
      src/core/lib/surface/init.cc
  51. 2
      src/core/tsi/ssl/session_cache/ssl_session_cache.cc
  52. 2
      src/cpp/client/channel_cc.cc
  53. 5
      src/cpp/client/client_context.cc
  54. 23
      src/cpp/server/dynamic_thread_pool.cc
  55. 7
      src/cpp/server/dynamic_thread_pool.h
  56. 28
      src/cpp/server/health/default_health_check_service.cc
  57. 7
      src/cpp/server/health/default_health_check_service.h
  58. 18
      src/cpp/server/load_reporter/load_reporter.cc
  59. 5
      src/cpp/server/load_reporter/load_reporter.h
  60. 24
      src/cpp/server/load_reporter/load_reporter_async_service_impl.cc
  61. 3
      src/cpp/server/load_reporter/load_reporter_async_service_impl.h
  62. 24
      src/cpp/server/server_cc.cc
  63. 17
      src/cpp/server/server_context.cc
  64. 34
      src/cpp/thread_manager/thread_manager.cc
  65. 7
      src/cpp/thread_manager/thread_manager.h
  66. 2
      src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
  67. 2
      src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
  68. 8
      src/python/grpcio_tests/commands.py
  69. 1
      src/python/grpcio_tests/tests/tests.json
  70. 1
      src/python/grpcio_tests/tests/unit/BUILD.bazel
  71. 63
      src/python/grpcio_tests/tests/unit/_dns_resolver_test.py
  72. 17
      test/cpp/client/client_channel_stress_test.cc
  73. 37
      test/cpp/end2end/client_lb_end2end_test.cc
  74. 67
      test/cpp/end2end/grpclb_end2end_test.cc
  75. 21
      test/cpp/end2end/thread_stress_test.cc
  76. 66
      test/cpp/end2end/xds_end2end_test.cc
  77. 2
      tools/distrib/python/docgen.py
  78. 4
      tools/distrib/sanitize.sh
  79. 1
      tools/doxygen/Doxyfile.c++
  80. 3
      tools/doxygen/Doxyfile.c++.internal
  81. 2
      tools/doxygen/Doxyfile.core.internal
  82. 20
      tools/run_tests/generated/sources_and_headers.json

14
BUILD

@ -525,6 +525,17 @@ grpc_cc_library(
], ],
) )
grpc_cc_library(
name = "grpc++_internal_hdrs_only",
hdrs = [
"include/grpcpp/impl/codegen/sync.h",
],
language = "c++",
deps = [
"gpr_codegen",
],
)
grpc_cc_library( grpc_cc_library(
name = "gpr_base", name = "gpr_base",
srcs = [ srcs = [
@ -590,8 +601,8 @@ grpc_cc_library(
"src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h", "src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h", "src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h", "src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h", "src/core/lib/gprpp/thd.h",
"src/core/lib/profiling/timers.h", "src/core/lib/profiling/timers.h",
], ],
@ -2147,6 +2158,7 @@ grpc_cc_library(
"include/grpcpp/impl/codegen/time.h", "include/grpcpp/impl/codegen/time.h",
], ],
deps = [ deps = [
"grpc++_internal_hdrs_only",
"grpc_codegen", "grpc_codegen",
], ],
) )

@ -186,8 +186,8 @@ config("grpc_config") {
"src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h", "src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h", "src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h", "src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h", "src/core/lib/gprpp/thd.h",
"src/core/lib/gprpp/thd_posix.cc", "src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc", "src/core/lib/gprpp/thd_windows.cc",
@ -1066,6 +1066,7 @@ config("grpc_config") {
"include/grpcpp/impl/codegen/status_code_enum.h", "include/grpcpp/impl/codegen/status_code_enum.h",
"include/grpcpp/impl/codegen/string_ref.h", "include/grpcpp/impl/codegen/string_ref.h",
"include/grpcpp/impl/codegen/stub_options.h", "include/grpcpp/impl/codegen/stub_options.h",
"include/grpcpp/impl/codegen/sync.h",
"include/grpcpp/impl/codegen/sync_stream.h", "include/grpcpp/impl/codegen/sync_stream.h",
"include/grpcpp/impl/codegen/time.h", "include/grpcpp/impl/codegen/time.h",
"include/grpcpp/impl/grpc_library.h", "include/grpcpp/impl/grpc_library.h",
@ -1161,12 +1162,12 @@ config("grpc_config") {
"src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h", "src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h", "src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/optional.h", "src/core/lib/gprpp/optional.h",
"src/core/lib/gprpp/orphanable.h", "src/core/lib/gprpp/orphanable.h",
"src/core/lib/gprpp/pair.h", "src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/ref_counted.h", "src/core/lib/gprpp/ref_counted.h",
"src/core/lib/gprpp/ref_counted_ptr.h", "src/core/lib/gprpp/ref_counted_ptr.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h", "src/core/lib/gprpp/thd.h",
"src/core/lib/http/format_request.h", "src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h", "src/core/lib/http/httpcli.h",

@ -3187,6 +3187,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -3790,6 +3791,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
include/grpc/census.h include/grpc/census.h
) )
string(REPLACE "include/" "" _path ${_hdr}) string(REPLACE "include/" "" _path ${_hdr})
@ -4244,6 +4246,7 @@ foreach(_hdr
include/grpc/impl/codegen/sync_generic.h include/grpc/impl/codegen/sync_generic.h
include/grpc/impl/codegen/sync_posix.h include/grpc/impl/codegen/sync_posix.h
include/grpc/impl/codegen/sync_windows.h include/grpc/impl/codegen/sync_windows.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -4440,6 +4443,7 @@ foreach(_hdr
include/grpc/impl/codegen/sync_generic.h include/grpc/impl/codegen/sync_generic.h
include/grpc/impl/codegen/sync_posix.h include/grpc/impl/codegen/sync_posix.h
include/grpc/impl/codegen/sync_windows.h include/grpc/impl/codegen/sync_windows.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -4766,6 +4770,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
) )
string(REPLACE "include/" "" _path ${_hdr}) string(REPLACE "include/" "" _path ${_hdr})
get_filename_component(_path ${_path} PATH) get_filename_component(_path ${_path} PATH)

@ -5523,6 +5523,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \ include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \ include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \ include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \ include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \ include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \ include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -6134,6 +6135,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \ include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \ include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \ include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc/census.h \ include/grpc/census.h \
LIBGRPC++_CRONET_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_CRONET_SRC)))) LIBGRPC++_CRONET_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_CRONET_SRC))))
@ -6560,6 +6562,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \ include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \ include/grpc/impl/codegen/sync_windows.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \ include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \ include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \ include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -6727,6 +6730,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \ include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \ include/grpc/impl/codegen/sync_windows.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \ include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \ include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \ include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -7059,6 +7063,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \ include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \ include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \ include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
LIBGRPC++_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_UNSECURE_SRC)))) LIBGRPC++_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_UNSECURE_SRC))))

@ -196,8 +196,8 @@ filegroups:
- src/core/lib/gprpp/manual_constructor.h - src/core/lib/gprpp/manual_constructor.h
- src/core/lib/gprpp/map.h - src/core/lib/gprpp/map.h
- src/core/lib/gprpp/memory.h - src/core/lib/gprpp/memory.h
- src/core/lib/gprpp/mutex_lock.h
- src/core/lib/gprpp/pair.h - src/core/lib/gprpp/pair.h
- src/core/lib/gprpp/sync.h
- src/core/lib/gprpp/thd.h - src/core/lib/gprpp/thd.h
- src/core/lib/profiling/timers.h - src/core/lib/profiling/timers.h
uses: uses:
@ -1278,6 +1278,7 @@ filegroups:
- include/grpcpp/impl/codegen/time.h - include/grpcpp/impl/codegen/time.h
uses: uses:
- grpc_codegen - grpc_codegen
- grpc++_internal_hdrs_only
- name: grpc++_codegen_base_src - name: grpc++_codegen_base_src
language: c++ language: c++
src: src:
@ -1452,6 +1453,7 @@ filegroups:
- grpc_base_headers - grpc_base_headers
- grpc_transport_inproc_headers - grpc_transport_inproc_headers
- grpc++_codegen_base - grpc++_codegen_base
- grpc++_internal_hdrs_only
- nanopb_headers - nanopb_headers
- health_proto - health_proto
- name: grpc++_config_proto - name: grpc++_config_proto
@ -1459,6 +1461,10 @@ filegroups:
public_headers: public_headers:
- include/grpc++/impl/codegen/config_protobuf.h - include/grpc++/impl/codegen/config_protobuf.h
- include/grpcpp/impl/codegen/config_protobuf.h - include/grpcpp/impl/codegen/config_protobuf.h
- name: grpc++_internal_hdrs_only
language: c++
public_headers:
- include/grpcpp/impl/codegen/sync.h
- name: grpc++_reflection_proto - name: grpc++_reflection_proto
language: c++ language: c++
src: src:

@ -145,13 +145,3 @@ some configuration as environment variables that can be set.
* grpc_cfstream * grpc_cfstream
set to 1 to turn on CFStream experiment. With this experiment gRPC uses CFStream API to make TCP set to 1 to turn on CFStream experiment. With this experiment gRPC uses CFStream API to make TCP
connections. The option is only available on iOS platform and when macro GRPC_CFSTREAM is defined. connections. The option is only available on iOS platform and when macro GRPC_CFSTREAM is defined.
* GRPC_ARENA_INIT_STRATEGY
Selects the initialization strategy for blocks allocated in the arena. Valid
values are:
- no_init (default): Do not initialize the arena block.
- zero_init: Initialize the arena blocks with 0.
- non_zero_init: Initialize the arena blocks with a non-zero value.
NOTE: This environment variable is experimental and will be removed. Thus, it
should not be relied upon.

@ -0,0 +1,32 @@
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
py_library(
name = "wait_for_ready_example",
testonly = 1,
srcs = ["wait_for_ready_example.py"],
deps = [
"//src/python/grpcio/grpc:grpcio",
"//examples:py_helloworld",
],
)
py_test(
name = "test/_wait_for_ready_example_test",
srcs = ["test/_wait_for_ready_example_test.py"],
deps = [":wait_for_ready_example",],
size = "small",
)

@ -0,0 +1,32 @@
# gRPC Python Example for Wait-for-ready
The default behavior of an RPC is to fail instantly if the server is not ready yet. This example demonstrates how to change that behavior.
### Definition of 'wait-for-ready' semantics
> If an RPC is issued but the channel is in TRANSIENT_FAILURE or SHUTDOWN states, the RPC is unable to be transmitted promptly. By default, gRPC implementations SHOULD fail such RPCs immediately. This is known as "fail fast," but the usage of the term is historical. RPCs SHOULD NOT fail as a result of the channel being in other states (CONNECTING, READY, or IDLE).
>
> gRPC implementations MAY provide a per-RPC option to not fail RPCs as a result of the channel being in TRANSIENT_FAILURE state. Instead, the implementation queues the RPCs until the channel is READY. This is known as "wait for ready." The RPCs SHOULD still fail before READY if there are unrelated reasons, such as the channel is SHUTDOWN or the RPC's deadline is reached.
>
> From https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md
### Use cases for 'wait-for-ready'
When developers spin up gRPC clients and servers at the same time, it is very like to fail first couple RPC calls due to unavailability of the server. If developers failed to prepare for this situation, the result can be catastrophic. But with 'wait-for-ready' semantics, developers can initialize the client and server in any order, especially useful in testing.
Also, developers may ensure the server is up before starting client. But in some cases like transient network failure may result in a temporary unavailability of the server. With 'wait-for-ready' semantics, those RPC calls will automatically wait until the server is ready to accept incoming requests.
### DEMO Snippets
```Python
# Per RPC level
stub = ...Stub(...)
stub.important_transaction_1(..., wait_for_ready=True)
stub.unimportant_transaction_2(...)
stub.important_transaction_3(..., wait_for_ready=True)
stub.unimportant_transaction_4(...)
# The unimportant transactions can be status report, or health check, etc.
```

@ -0,0 +1,31 @@
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the wait-for-ready example."""
import unittest
import logging
from examples.python.wait_for_ready import wait_for_ready_example
class WaitForReadyExampleTest(unittest.TestCase):
def test_wait_for_ready_example(self):
wait_for_ready_example.main()
# No unhandled exception raised, no deadlock, test passed!
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

@ -0,0 +1,114 @@
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python example of utilizing wait-for-ready flag."""
from __future__ import print_function
import logging
from concurrent import futures
from contextlib import contextmanager
import socket
import threading
import grpc
from examples.protos import helloworld_pb2
from examples.protos import helloworld_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
@contextmanager
def get_free_loopback_tcp_port():
tcp_socket = socket.socket(socket.AF_INET6)
tcp_socket.bind(('', 0))
address_tuple = tcp_socket.getsockname()
yield "[::1]:%s" % (address_tuple[1])
tcp_socket.close()
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, unused_context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def create_server(server_address):
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
bound_port = server.add_insecure_port(server_address)
assert bound_port == int(server_address.split(':')[-1])
return server
def process(stub, wait_for_ready=None):
try:
response = stub.SayHello(
helloworld_pb2.HelloRequest(name='you'),
wait_for_ready=wait_for_ready)
message = response.message
except grpc.RpcError as rpc_error:
assert rpc_error.code() == grpc.StatusCode.UNAVAILABLE
assert not wait_for_ready
message = rpc_error
else:
assert wait_for_ready
_LOGGER.info("Wait-for-ready %s, client received: %s", "enabled"
if wait_for_ready else "disabled", message)
def main():
# Pick a random free port
with get_free_loopback_tcp_port() as server_address:
# Register connectivity event to notify main thread
transient_failure_event = threading.Event()
def wait_for_transient_failure(channel_connectivity):
if channel_connectivity == grpc.ChannelConnectivity.TRANSIENT_FAILURE:
transient_failure_event.set()
# Create gRPC channel
channel = grpc.insecure_channel(server_address)
channel.subscribe(wait_for_transient_failure)
stub = helloworld_pb2_grpc.GreeterStub(channel)
# Fire an RPC without wait_for_ready
thread_disabled_wait_for_ready = threading.Thread(
target=process, args=(stub, False))
thread_disabled_wait_for_ready.start()
# Fire an RPC with wait_for_ready
thread_enabled_wait_for_ready = threading.Thread(
target=process, args=(stub, True))
thread_enabled_wait_for_ready.start()
# Wait for the channel entering TRANSIENT FAILURE state.
transient_failure_event.wait()
server = create_server(server_address)
server.start()
# Expected to fail with StatusCode.UNAVAILABLE.
thread_disabled_wait_for_ready.join()
# Expected to success.
thread_enabled_wait_for_ready.join()
server.stop(None)
channel.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()

@ -183,7 +183,8 @@ Pod::Spec.new do |s|
'include/grpcpp/impl/codegen/string_ref.h', 'include/grpcpp/impl/codegen/string_ref.h',
'include/grpcpp/impl/codegen/stub_options.h', 'include/grpcpp/impl/codegen/stub_options.h',
'include/grpcpp/impl/codegen/sync_stream.h', 'include/grpcpp/impl/codegen/sync_stream.h',
'include/grpcpp/impl/codegen/time.h' 'include/grpcpp/impl/codegen/time.h',
'include/grpcpp/impl/codegen/sync.h'
end end
s.subspec 'Implementation' do |ss| s.subspec 'Implementation' do |ss|
@ -266,8 +267,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h', 'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h', 'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h', 'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h', 'src/core/lib/profiling/timers.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h', 'src/core/ext/transport/chttp2/transport/bin_decoder.h',
@ -584,8 +585,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h', 'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h', 'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h', 'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h', 'src/core/lib/profiling/timers.h',
'src/core/lib/avl/avl.h', 'src/core/lib/avl/avl.h',

@ -210,8 +210,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h', 'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h', 'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h', 'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h', 'src/core/lib/profiling/timers.h',
'src/core/lib/gpr/alloc.cc', 'src/core/lib/gpr/alloc.cc',
@ -891,8 +891,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h', 'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h', 'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h', 'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h', 'src/core/lib/profiling/timers.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h', 'src/core/ext/transport/chttp2/transport/bin_decoder.h',

@ -104,8 +104,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/manual_constructor.h ) s.files += %w( src/core/lib/gprpp/manual_constructor.h )
s.files += %w( src/core/lib/gprpp/map.h ) s.files += %w( src/core/lib/gprpp/map.h )
s.files += %w( src/core/lib/gprpp/memory.h ) s.files += %w( src/core/lib/gprpp/memory.h )
s.files += %w( src/core/lib/gprpp/mutex_lock.h )
s.files += %w( src/core/lib/gprpp/pair.h ) s.files += %w( src/core/lib/gprpp/pair.h )
s.files += %w( src/core/lib/gprpp/sync.h )
s.files += %w( src/core/lib/gprpp/thd.h ) s.files += %w( src/core/lib/gprpp/thd.h )
s.files += %w( src/core/lib/profiling/timers.h ) s.files += %w( src/core/lib/profiling/timers.h )
s.files += %w( src/core/lib/gpr/alloc.cc ) s.files += %w( src/core/lib/gpr/alloc.cc )

@ -264,7 +264,7 @@ GRPCAPI grpc_call_credentials* grpc_google_refresh_token_credentials_create(
const char* json_refresh_token, void* reserved); const char* json_refresh_token, void* reserved);
/** Creates an Oauth2 Access Token credentials with an access token that was /** Creates an Oauth2 Access Token credentials with an access token that was
aquired by an out of band mechanism. */ acquired by an out of band mechanism. */
GRPCAPI grpc_call_credentials* grpc_access_token_credentials_create( GRPCAPI grpc_call_credentials* grpc_access_token_credentials_create(
const char* access_token, void* reserved); const char* access_token, void* reserved);

@ -115,6 +115,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -144,6 +145,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#define GPR_SUPPORT_CHANNELS_FROM_FD 1 #define GPR_SUPPORT_CHANNELS_FROM_FD 1
#elif defined(__linux__) #elif defined(__linux__)
@ -170,6 +172,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -235,6 +238,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifndef GRPC_CFSTREAM #ifndef GRPC_CFSTREAM
#define GPR_SUPPORT_CHANNELS_FROM_FD 1 #define GPR_SUPPORT_CHANNELS_FROM_FD 1
@ -260,6 +264,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#define GPR_SUPPORT_CHANNELS_FROM_FD 1 #define GPR_SUPPORT_CHANNELS_FROM_FD 1
#ifdef _LP64 #ifdef _LP64
@ -283,6 +288,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#define GPR_SUPPORT_CHANNELS_FROM_FD 1 #define GPR_SUPPORT_CHANNELS_FROM_FD 1
#ifdef _LP64 #ifdef _LP64
@ -303,6 +309,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -325,6 +332,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -353,6 +361,7 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -378,6 +387,7 @@
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_STRING 1 #define GPR_POSIX_STRING 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#else #else
#error "Could not auto-detect platform" #error "Could not auto-detect platform"

@ -147,7 +147,7 @@ GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t blen);
GPRAPI int grpc_slice_rchr(grpc_slice s, char c); GPRAPI int grpc_slice_rchr(grpc_slice s, char c);
GPRAPI int grpc_slice_chr(grpc_slice s, char c); GPRAPI int grpc_slice_chr(grpc_slice s, char c);
/** return the index of the first occurance of \a needle in \a haystack, or -1 /** return the index of the first occurrence of \a needle in \a haystack, or -1
if it's not found */ if it's not found */
GPRAPI int grpc_slice_slice(grpc_slice haystack, grpc_slice needle); GPRAPI int grpc_slice_slice(grpc_slice haystack, grpc_slice needle);

@ -28,6 +28,7 @@
#include <grpcpp/impl/codegen/client_interceptor.h> #include <grpcpp/impl/codegen/client_interceptor.h>
#include <grpcpp/impl/codegen/config.h> #include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/grpc_library.h> #include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/sync.h>
struct grpc_channel; struct grpc_channel;
@ -97,7 +98,7 @@ class Channel final : public ChannelInterface,
grpc_channel* const c_channel_; // owned grpc_channel* const c_channel_; // owned
// mu_ protects callback_cq_ (the per-channel callbackable completion queue) // mu_ protects callback_cq_ (the per-channel callbackable completion queue)
std::mutex mu_; grpc::internal::Mutex mu_;
// callback_cq_ references the callbackable completion queue associated // callback_cq_ references the callbackable completion queue associated
// with this channel (if any). It is set on the first call to CallbackCQ(). // with this channel (if any). It is set on the first call to CallbackCQ().

@ -51,6 +51,7 @@
#include <grpcpp/impl/codegen/slice.h> #include <grpcpp/impl/codegen/slice.h>
#include <grpcpp/impl/codegen/status.h> #include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/string_ref.h> #include <grpcpp/impl/codegen/string_ref.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/impl/codegen/time.h> #include <grpcpp/impl/codegen/time.h>
struct census_context; struct census_context;
@ -457,7 +458,7 @@ class ClientContext {
bool idempotent_; bool idempotent_;
bool cacheable_; bool cacheable_;
std::shared_ptr<Channel> channel_; std::shared_ptr<Channel> channel_;
std::mutex mu_; grpc::internal::Mutex mu_;
grpc_call* call_; grpc_call* call_;
bool call_canceled_; bool call_canceled_;
gpr_timespec deadline_; gpr_timespec deadline_;

@ -183,8 +183,8 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// within the \a deadline). A \a tag points to an arbitrary location usually /// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event. /// employed to uniquely identify an event.
/// ///
/// \param tag [out] Upon sucess, updated to point to the event's tag. /// \param tag [out] Upon success, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if a successful event, false otherwise /// \param ok [out] Upon success, true if a successful event, false otherwise
/// See documentation for CompletionQueue::Next for explanation of ok /// See documentation for CompletionQueue::Next for explanation of ok
/// \param deadline [in] How long to block in wait for an event. /// \param deadline [in] How long to block in wait for an event.
/// ///
@ -203,8 +203,8 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// employed to uniquely identify an event. /// employed to uniquely identify an event.
/// ///
/// \param f [in] Function to execute before calling AsyncNext on this queue. /// \param f [in] Function to execute before calling AsyncNext on this queue.
/// \param tag [out] Upon sucess, updated to point to the event's tag. /// \param tag [out] Upon success, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if read a regular event, false /// \param ok [out] Upon success, true if read a regular event, false
/// otherwise. /// otherwise.
/// \param deadline [in] How long to block in wait for an event. /// \param deadline [in] How long to block in wait for an event.
/// ///
@ -362,7 +362,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// queue should not really shutdown until all avalanching operations have /// queue should not really shutdown until all avalanching operations have
/// been finalized. Note that we maintain the requirement that an avalanche /// been finalized. Note that we maintain the requirement that an avalanche
/// registration must take place before CQ shutdown (which must be maintained /// registration must take place before CQ shutdown (which must be maintained
/// elsehwere) /// elsewhere)
void InitialAvalanching() { void InitialAvalanching() {
gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1)); gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
} }

@ -149,7 +149,7 @@ class ServerInterface : public internal::CallHook {
/// 192.168.1.1:31416, [::1]:27182, etc.). /// 192.168.1.1:31416, [::1]:27182, etc.).
/// \params creds The credentials associated with the server. /// \params creds The credentials associated with the server.
/// ///
/// \return bound port number on sucess, 0 on failure. /// \return bound port number on success, 0 on failure.
/// ///
/// \warning It's an error to call this method on an already started server. /// \warning It's an error to call this method on an already started server.
virtual int AddListeningPort(const grpc::string& addr, virtual int AddListeningPort(const grpc::string& addr,

@ -0,0 +1,151 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_IMPL_CODEGEN_SYNC_H
#define GRPCPP_IMPL_CODEGEN_SYNC_H
#include <grpc/impl/codegen/port_platform.h>
#ifdef GPR_HAS_PTHREAD_H
#include <pthread.h>
#endif
#include <mutex>
#include <grpc/impl/codegen/log.h>
#include <grpc/impl/codegen/sync.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
// The core library is not accessible in C++ codegen headers, and vice versa.
// Thus, we need to have duplicate headers with similar functionality.
// Make sure any change to this file is also reflected in
// src/core/lib/gprpp/sync.h too.
//
// Whenever possible, prefer "src/core/lib/gprpp/sync.h" over this file,
// since in core we do not rely on g_core_codegen_interface and hence do not
// pay the costs of virtual function calls.
namespace grpc {
namespace internal {
class Mutex {
public:
Mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); }
~Mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); }
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
gpr_mu* get() { return &mu_; }
const gpr_mu* get() const { return &mu_; }
private:
union {
gpr_mu mu_;
std::mutex do_not_use_sth_;
#ifdef GPR_HAS_PTHREAD_H
pthread_mutex_t do_not_use_pth_;
#endif
};
};
// MutexLock is a std::
class MutexLock {
public:
explicit MutexLock(Mutex* mu) : mu_(mu->get()) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
explicit MutexLock(gpr_mu* mu) : mu_(mu) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
~MutexLock() { g_core_codegen_interface->gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
class ReleasableMutexLock {
public:
explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
explicit ReleasableMutexLock(gpr_mu* mu) : mu_(mu) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
~ReleasableMutexLock() {
if (!released_) g_core_codegen_interface->gpr_mu_unlock(mu_);
}
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
void Lock() {
GPR_DEBUG_ASSERT(released_);
g_core_codegen_interface->gpr_mu_lock(mu_);
released_ = false;
}
void Unlock() {
GPR_DEBUG_ASSERT(!released_);
released_ = true;
g_core_codegen_interface->gpr_mu_unlock(mu_);
}
private:
gpr_mu* const mu_;
bool released_ = false;
};
class CondVar {
public:
CondVar() { g_core_codegen_interface->gpr_cv_init(&cv_); }
~CondVar() { g_core_codegen_interface->gpr_cv_destroy(&cv_); }
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
void Signal() { g_core_codegen_interface->gpr_cv_signal(&cv_); }
void Broadcast() { g_core_codegen_interface->gpr_cv_broadcast(&cv_); }
int Wait(Mutex* mu) {
return Wait(mu,
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME));
}
int Wait(Mutex* mu, const gpr_timespec& deadline) {
return g_core_codegen_interface->gpr_cv_wait(&cv_, mu->get(), deadline);
}
template <typename Predicate>
void WaitUntil(Mutex* mu, Predicate pred) {
while (!pred()) {
Wait(mu, g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME));
}
}
private:
gpr_cv cv_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SYNC_H

@ -180,7 +180,7 @@ class ClientReader final : public ClientReaderInterface<R> {
/// ///
// Side effect: // Side effect:
/// Once complete, the initial metadata read from /// Once complete, the initial metadata read from
/// the server will be accessable through the \a ClientContext used to /// the server will be accessible through the \a ClientContext used to
/// construct this object. /// construct this object.
void WaitForInitialMetadata() override { void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
@ -298,7 +298,7 @@ class ClientWriter : public ClientWriterInterface<W> {
/// ///
// Side effect: // Side effect:
/// Once complete, the initial metadata read from the server will be /// Once complete, the initial metadata read from the server will be
/// accessable through the \a ClientContext used to construct this object. /// accessible through the \a ClientContext used to construct this object.
void WaitForInitialMetadata() { void WaitForInitialMetadata() {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
@ -449,7 +449,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// with or after the \a Finish method. /// with or after the \a Finish method.
/// ///
/// Once complete, the initial metadata read from the server will be /// Once complete, the initial metadata read from the server will be
/// accessable through the \a ClientContext used to construct this object. /// accessible through the \a ClientContext used to construct this object.
void WaitForInitialMetadata() override { void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);

@ -297,12 +297,12 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
experimental_registration_type experimental_registration_{this}; experimental_registration_type experimental_registration_{this};
// Server status // Server status
std::mutex mu_; grpc::internal::Mutex mu_;
bool started_; bool started_;
bool shutdown_; bool shutdown_;
bool shutdown_notified_; // Was notify called on the shutdown_cv_ bool shutdown_notified_; // Was notify called on the shutdown_cv_
std::condition_variable shutdown_cv_; grpc::internal::CondVar shutdown_cv_;
// It is ok (but not required) to nest callback_reqs_mu_ under mu_ . // It is ok (but not required) to nest callback_reqs_mu_ under mu_ .
// Incrementing callback_reqs_outstanding_ is ok without a lock but it must be // Incrementing callback_reqs_outstanding_ is ok without a lock but it must be
@ -311,8 +311,8 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
// during periods of increasing load; the decrement happens only when memory // during periods of increasing load; the decrement happens only when memory
// is maxed out, during server shutdown, or (possibly in a future version) // is maxed out, during server shutdown, or (possibly in a future version)
// during decreasing load, so it is less performance-critical. // during decreasing load, so it is less performance-critical.
std::mutex callback_reqs_mu_; grpc::internal::Mutex callback_reqs_mu_;
std::condition_variable callback_reqs_done_cv_; grpc::internal::CondVar callback_reqs_done_cv_;
std::atomic_int callback_reqs_outstanding_{0}; std::atomic_int callback_reqs_outstanding_{0};
std::shared_ptr<GlobalCallbacks> global_callbacks_; std::shared_ptr<GlobalCallbacks> global_callbacks_;

@ -304,12 +304,12 @@ class Server : public grpc::ServerInterface, private grpc::GrpcLibraryCodegen {
experimental_registration_type experimental_registration_{this}; experimental_registration_type experimental_registration_{this};
// Server status // Server status
std::mutex mu_; grpc::internal::Mutex mu_;
bool started_; bool started_;
bool shutdown_; bool shutdown_;
bool shutdown_notified_; // Was notify called on the shutdown_cv_ bool shutdown_notified_; // Was notify called on the shutdown_cv_
std::condition_variable shutdown_cv_; grpc::internal::CondVar shutdown_cv_;
// It is ok (but not required) to nest callback_reqs_mu_ under mu_ . // It is ok (but not required) to nest callback_reqs_mu_ under mu_ .
// Incrementing callback_reqs_outstanding_ is ok without a lock but it must be // Incrementing callback_reqs_outstanding_ is ok without a lock but it must be
@ -318,8 +318,8 @@ class Server : public grpc::ServerInterface, private grpc::GrpcLibraryCodegen {
// during periods of increasing load; the decrement happens only when memory // during periods of increasing load; the decrement happens only when memory
// is maxed out, during server shutdown, or (possibly in a future version) // is maxed out, during server shutdown, or (possibly in a future version)
// during decreasing load, so it is less performance-critical. // during decreasing load, so it is less performance-critical.
std::mutex callback_reqs_mu_; grpc::internal::Mutex callback_reqs_mu_;
std::condition_variable callback_reqs_done_cv_; grpc::internal::CondVar callback_reqs_done_cv_;
std::atomic_int callback_reqs_outstanding_{0}; std::atomic_int callback_reqs_outstanding_{0};
std::shared_ptr<GlobalCallbacks> global_callbacks_; std::shared_ptr<GlobalCallbacks> global_callbacks_;

@ -109,8 +109,8 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/manual_constructor.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/manual_constructor.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/map.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/map.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/memory.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/memory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/mutex_lock.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/pair.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/pair.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/sync.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/alloc.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gpr/alloc.cc" role="src" />

File diff suppressed because it is too large Load Diff

@ -27,7 +27,7 @@
#include "pb_encode.h" #include "pb_encode.h"
#include "src/core/ext/filters/client_channel/health/health.pb.h" #include "src/core/ext/filters/client_channel/health/health.pb.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/error_utils.h" #include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/status_metadata.h" #include "src/core/lib/transport/status_metadata.h"
@ -69,7 +69,6 @@ HealthCheckClient::HealthCheckClient(
} }
GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this, GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
gpr_mu_init(&mu_);
StartCall(); StartCall();
} }
@ -78,7 +77,6 @@ HealthCheckClient::~HealthCheckClient() {
gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this); gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
} }
GRPC_ERROR_UNREF(error_); GRPC_ERROR_UNREF(error_);
gpr_mu_destroy(&mu_);
} }
void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state, void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state,

@ -31,6 +31,7 @@
#include "src/core/lib/gprpp/atomic.h" #include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
@ -157,7 +158,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
grpc_pollset_set* interested_parties_; // Do not own. grpc_pollset_set* interested_parties_; // Do not own.
RefCountedPtr<channelz::SubchannelNode> channelz_node_; RefCountedPtr<channelz::SubchannelNode> channelz_node_;
gpr_mu mu_; Mutex mu_;
grpc_connectivity_state state_ = GRPC_CHANNEL_CONNECTING; grpc_connectivity_state state_ = GRPC_CHANNEL_CONNECTING;
grpc_error* error_ = GRPC_ERROR_NONE; grpc_error* error_ = GRPC_ERROR_NONE;
grpc_connectivity_state* notify_state_ = nullptr; grpc_connectivity_state* notify_state_ = nullptr;

@ -33,7 +33,7 @@
#include "src/core/lib/channel/handshaker_registry.h" #include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/gpr/env.h" #include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/http/format_request.h" #include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h" #include "src/core/lib/http/parser.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"

@ -88,7 +88,6 @@
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"

@ -25,7 +25,7 @@
#include <grpc/support/atm.h> #include <grpc/support/atm.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
namespace grpc_core { namespace grpc_core {

@ -26,6 +26,7 @@
#include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_core { namespace grpc_core {
@ -41,9 +42,6 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
typedef InlinedVector<DropTokenCount, 10> DroppedCallCounts; typedef InlinedVector<DropTokenCount, 10> DroppedCallCounts;
GrpcLbClientStats() { gpr_mu_init(&drop_count_mu_); }
~GrpcLbClientStats() { gpr_mu_destroy(&drop_count_mu_); }
void AddCallStarted(); void AddCallStarted();
void AddCallFinished(bool finished_with_client_failed_to_send, void AddCallFinished(bool finished_with_client_failed_to_send,
bool finished_known_received); bool finished_known_received);
@ -66,7 +64,7 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
gpr_atm num_calls_finished_ = 0; gpr_atm num_calls_finished_ = 0;
gpr_atm num_calls_finished_with_client_failed_to_send_ = 0; gpr_atm num_calls_finished_with_client_failed_to_send_ = 0;
gpr_atm num_calls_finished_known_received_ = 0; gpr_atm num_calls_finished_known_received_ = 0;
gpr_mu drop_count_mu_; // Guards drop_token_counts_. Mutex drop_count_mu_; // Guards drop_token_counts_.
UniquePtr<DroppedCallCounts> drop_token_counts_; UniquePtr<DroppedCallCounts> drop_token_counts_;
}; };

@ -27,7 +27,7 @@
#include "src/core/ext/filters/client_channel/server_address.h" #include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
@ -154,13 +154,12 @@ class PickFirst : public LoadBalancingPolicy {
/// Lock and data used to capture snapshots of this channels child /// Lock and data used to capture snapshots of this channels child
/// channels and subchannels. This data is consumed by channelz. /// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_; Mutex child_refs_mu_;
channelz::ChildRefsList child_subchannels_; channelz::ChildRefsList child_subchannels_;
channelz::ChildRefsList child_channels_; channelz::ChildRefsList child_channels_;
}; };
PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) { PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
gpr_mu_init(&child_refs_mu_);
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p created.", this); gpr_log(GPR_INFO, "Pick First %p created.", this);
} }
@ -170,7 +169,6 @@ PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Destroying Pick First %p", this); gpr_log(GPR_INFO, "Destroying Pick First %p", this);
} }
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
} }

@ -36,8 +36,8 @@
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
@ -188,7 +188,7 @@ class RoundRobin : public LoadBalancingPolicy {
bool shutdown_ = false; bool shutdown_ = false;
/// Lock and data used to capture snapshots of this channel's child /// Lock and data used to capture snapshots of this channel's child
/// channels and subchannels. This data is consumed by channelz. /// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_; Mutex child_refs_mu_;
channelz::ChildRefsList child_subchannels_; channelz::ChildRefsList child_subchannels_;
channelz::ChildRefsList child_channels_; channelz::ChildRefsList child_channels_;
}; };
@ -240,7 +240,6 @@ RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs* pick,
// //
RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) { RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
gpr_mu_init(&child_refs_mu_);
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Created", this); gpr_log(GPR_INFO, "[RR %p] Created", this);
} }
@ -250,7 +249,6 @@ RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this); gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
} }
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
} }

@ -89,9 +89,9 @@
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/map.h" #include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
@ -118,6 +118,7 @@ namespace {
constexpr char kXds[] = "xds_experimental"; constexpr char kXds[] = "xds_experimental";
constexpr char kDefaultLocalityName[] = "xds_default_locality"; constexpr char kDefaultLocalityName[] = "xds_default_locality";
constexpr uint32_t kDefaultLocalityWeight = 3;
class XdsLb : public LoadBalancingPolicy { class XdsLb : public LoadBalancingPolicy {
public: public:
@ -259,29 +260,52 @@ class XdsLb : public LoadBalancingPolicy {
bool retry_timer_callback_pending_ = false; bool retry_timer_callback_pending_ = false;
}; };
// Since pickers are UniquePtrs we use this RefCounted wrapper
// to control references to it by the xds picker and the locality
// entry
class PickerRef : public RefCounted<PickerRef> {
public:
explicit PickerRef(UniquePtr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs* pick, grpc_error** error) {
return picker_->Pick(pick, error);
}
private:
UniquePtr<SubchannelPicker> picker_;
};
// The picker will use a stateless weighting algorithm to pick the locality to
// use for each request.
class Picker : public SubchannelPicker { class Picker : public SubchannelPicker {
public: public:
Picker(UniquePtr<SubchannelPicker> child_picker, // Maintains a weighted list of pickers from each locality that is in ready
RefCountedPtr<XdsLbClientStats> client_stats) // state. The first element in the pair represents the end of a range
: child_picker_(std::move(child_picker)), // proportional to the locality's weight. The start of the range is the
client_stats_(std::move(client_stats)) {} // previous value in the vector and is 0 for the first element.
using PickerList =
InlinedVector<Pair<uint32_t, RefCountedPtr<PickerRef>>, 1>;
Picker(RefCountedPtr<XdsLbClientStats> client_stats, PickerList pickers)
: client_stats_(std::move(client_stats)),
pickers_(std::move(pickers)) {}
PickResult Pick(PickArgs* pick, grpc_error** error) override; PickResult Pick(PickArgs* pick, grpc_error** error) override;
private: private:
UniquePtr<SubchannelPicker> child_picker_; // Calls the picker of the locality that the key falls within
PickResult PickFromLocality(const uint32_t key, PickArgs* pick,
grpc_error** error);
RefCountedPtr<XdsLbClientStats> client_stats_; RefCountedPtr<XdsLbClientStats> client_stats_;
PickerList pickers_;
}; };
class LocalityMap { class LocalityMap {
public: public:
class LocalityEntry : public InternallyRefCounted<LocalityEntry> { class LocalityEntry : public InternallyRefCounted<LocalityEntry> {
public: public:
explicit LocalityEntry(RefCountedPtr<XdsLb> parent) LocalityEntry(RefCountedPtr<XdsLb> parent, uint32_t locality_weight)
: parent_(std::move(parent)) { : parent_(std::move(parent)), locality_weight_(locality_weight) {}
gpr_mu_init(&child_policy_mu_); ~LocalityEntry() = default;
}
~LocalityEntry() { gpr_mu_destroy(&child_policy_mu_); }
void UpdateLocked(xds_grpclb_serverlist* serverlist, void UpdateLocked(xds_grpclb_serverlist* serverlist,
LoadBalancingPolicy::Config* child_policy_config, LoadBalancingPolicy::Config* child_policy_config,
@ -323,13 +347,13 @@ class XdsLb : public LoadBalancingPolicy {
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_; OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;
// Lock held when modifying the value of child_policy_ or // Lock held when modifying the value of child_policy_ or
// pending_child_policy_. // pending_child_policy_.
gpr_mu child_policy_mu_; Mutex child_policy_mu_;
RefCountedPtr<XdsLb> parent_; RefCountedPtr<XdsLb> parent_;
RefCountedPtr<PickerRef> picker_ref_;
grpc_connectivity_state connectivity_state_;
uint32_t locality_weight_;
}; };
LocalityMap() { gpr_mu_init(&child_refs_mu_); }
~LocalityMap() { gpr_mu_destroy(&child_refs_mu_); }
void UpdateLocked(const LocalityList& locality_list, void UpdateLocked(const LocalityList& locality_list,
LoadBalancingPolicy::Config* child_policy_config, LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args, XdsLb* parent); const grpc_channel_args* args, XdsLb* parent);
@ -343,7 +367,7 @@ class XdsLb : public LoadBalancingPolicy {
Map<UniquePtr<char>, OrphanablePtr<LocalityEntry>, StringLess> map_; Map<UniquePtr<char>, OrphanablePtr<LocalityEntry>, StringLess> map_;
// Lock held while filling child refs for all localities // Lock held while filling child refs for all localities
// inside the map // inside the map
gpr_mu child_refs_mu_; Mutex child_refs_mu_;
}; };
struct LocalityServerlistEntry { struct LocalityServerlistEntry {
@ -351,7 +375,9 @@ class XdsLb : public LoadBalancingPolicy {
gpr_free(locality_name); gpr_free(locality_name);
xds_grpclb_destroy_serverlist(serverlist); xds_grpclb_destroy_serverlist(serverlist);
} }
char* locality_name; char* locality_name;
uint32_t locality_weight;
// The deserialized response from the balancer. May be nullptr until one // The deserialized response from the balancer. May be nullptr until one
// such response has arrived. // such response has arrived.
xds_grpclb_serverlist* serverlist; xds_grpclb_serverlist* serverlist;
@ -397,7 +423,7 @@ class XdsLb : public LoadBalancingPolicy {
// Mutex to protect the channel to the LB server. This is used when // Mutex to protect the channel to the LB server. This is used when
// processing a channelz request. // processing a channelz request.
// TODO(juanlishen): Replace this with atomic. // TODO(juanlishen): Replace this with atomic.
gpr_mu lb_chand_mu_; Mutex lb_chand_mu_;
// Timeout in milliseconds for the LB call. 0 means no deadline. // Timeout in milliseconds for the LB call. 0 means no deadline.
int lb_call_timeout_ms_ = 0; int lb_call_timeout_ms_ = 0;
@ -417,6 +443,8 @@ class XdsLb : public LoadBalancingPolicy {
RefCountedPtr<Config> child_policy_config_; RefCountedPtr<Config> child_policy_config_;
// Map of policies to use in the backend // Map of policies to use in the backend
LocalityMap locality_map_; LocalityMap locality_map_;
// TODO(mhaidry) : Add support for multiple maps of localities
// with different priorities
LocalityList locality_serverlist_; LocalityList locality_serverlist_;
// TODO(mhaidry) : Add a pending locality map that may be swapped with the // TODO(mhaidry) : Add a pending locality map that may be swapped with the
// the current one when new localities in the pending map are ready // the current one when new localities in the pending map are ready
@ -429,8 +457,12 @@ class XdsLb : public LoadBalancingPolicy {
XdsLb::PickResult XdsLb::Picker::Pick(PickArgs* pick, grpc_error** error) { XdsLb::PickResult XdsLb::Picker::Pick(PickArgs* pick, grpc_error** error) {
// TODO(roth): Add support for drop handling. // TODO(roth): Add support for drop handling.
// Forward pick to child policy. // Generate a random number between 0 and the total weight
PickResult result = child_picker_->Pick(pick, error); const uint32_t key =
(rand() * pickers_[pickers_.size() - 1].first) / RAND_MAX;
// Forward pick to whichever locality maps to the range in which the
// random number falls in.
PickResult result = PickFromLocality(key, pick, error);
// If pick succeeded, add client stats. // If pick succeeded, add client stats.
if (result == PickResult::PICK_COMPLETE && if (result == PickResult::PICK_COMPLETE &&
pick->connected_subchannel != nullptr && client_stats_ != nullptr) { pick->connected_subchannel != nullptr && client_stats_ != nullptr) {
@ -439,6 +471,29 @@ XdsLb::PickResult XdsLb::Picker::Pick(PickArgs* pick, grpc_error** error) {
return result; return result;
} }
XdsLb::PickResult XdsLb::Picker::PickFromLocality(const uint32_t key,
PickArgs* pick,
grpc_error** error) {
size_t mid = 0;
size_t start_index = 0;
size_t end_index = pickers_.size() - 1;
size_t index = 0;
while (end_index > start_index) {
mid = (start_index + end_index) / 2;
if (pickers_[mid].first > key) {
end_index = mid;
} else if (pickers_[mid].first < key) {
start_index = mid + 1;
} else {
index = mid + 1;
break;
}
}
if (index == 0) index = start_index;
GPR_ASSERT(pickers_[index].first > key);
return pickers_[index].second->Pick(pick, error);
}
// //
// serverlist parsing code // serverlist parsing code
// //
@ -940,6 +995,8 @@ void XdsLb::BalancerChannelState::BalancerCallState::
MakeUnique<LocalityServerlistEntry>()); MakeUnique<LocalityServerlistEntry>());
xdslb_policy->locality_serverlist_[0]->locality_name = xdslb_policy->locality_serverlist_[0]->locality_name =
static_cast<char*>(gpr_strdup(kDefaultLocalityName)); static_cast<char*>(gpr_strdup(kDefaultLocalityName));
xdslb_policy->locality_serverlist_[0]->locality_weight =
kDefaultLocalityWeight;
} }
// and update the copy in the XdsLb instance. This // and update the copy in the XdsLb instance. This
// serverlist instance will be destroyed either upon the next // serverlist instance will be destroyed either upon the next
@ -1090,7 +1147,6 @@ XdsLb::XdsLb(Args args)
: LoadBalancingPolicy(std::move(args)), : LoadBalancingPolicy(std::move(args)),
locality_map_(), locality_map_(),
locality_serverlist_() { locality_serverlist_() {
gpr_mu_init(&lb_chand_mu_);
// Record server name. // Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg); const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1114,7 +1170,6 @@ XdsLb::XdsLb(Args args)
} }
XdsLb::~XdsLb() { XdsLb::~XdsLb() {
gpr_mu_destroy(&lb_chand_mu_);
gpr_free((void*)server_name_); gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_); grpc_channel_args_destroy(args_);
locality_serverlist_.clear(); locality_serverlist_.clear();
@ -1323,8 +1378,8 @@ void XdsLb::LocalityMap::UpdateLocked(
gpr_strdup(locality_serverlist[i]->locality_name)); gpr_strdup(locality_serverlist[i]->locality_name));
auto iter = map_.find(locality_name); auto iter = map_.find(locality_name);
if (iter == map_.end()) { if (iter == map_.end()) {
OrphanablePtr<LocalityEntry> new_entry = OrphanablePtr<LocalityEntry> new_entry = MakeOrphanable<LocalityEntry>(
MakeOrphanable<LocalityEntry>(parent->Ref()); parent->Ref(), locality_serverlist[i]->locality_weight);
MutexLock lock(&child_refs_mu_); MutexLock lock(&child_refs_mu_);
iter = map_.emplace(std::move(locality_name), std::move(new_entry)).first; iter = map_.emplace(std::move(locality_name), std::move(new_entry)).first;
} }
@ -1342,8 +1397,8 @@ void grpc_core::XdsLb::LocalityMap::ShutdownLocked() {
} }
void grpc_core::XdsLb::LocalityMap::ResetBackoffLocked() { void grpc_core::XdsLb::LocalityMap::ResetBackoffLocked() {
for (auto iter = map_.begin(); iter != map_.end(); iter++) { for (auto& p : map_) {
iter->second->ResetBackoffLocked(); p.second->ResetBackoffLocked();
} }
} }
@ -1351,8 +1406,8 @@ void grpc_core::XdsLb::LocalityMap::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels, channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) { channelz::ChildRefsList* child_channels) {
MutexLock lock(&child_refs_mu_); MutexLock lock(&child_refs_mu_);
for (auto iter = map_.begin(); iter != map_.end(); iter++) { for (auto& p : map_) {
iter->second->FillChildRefsForChannelz(child_subchannels, child_channels); p.second->FillChildRefsForChannelz(child_subchannels, child_channels);
} }
} }
@ -1624,9 +1679,72 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::UpdateState(
entry_->parent_->lb_chand_->lb_calld() == nullptr entry_->parent_->lb_chand_->lb_calld() == nullptr
? nullptr ? nullptr
: entry_->parent_->lb_chand_->lb_calld()->client_stats(); : entry_->parent_->lb_chand_->lb_calld()->client_stats();
entry_->parent_->channel_control_helper()->UpdateState( // Cache the picker and its state in the entry
state, UniquePtr<SubchannelPicker>( entry_->picker_ref_ = MakeRefCounted<PickerRef>(std::move(picker));
New<Picker>(std::move(picker), std::move(client_stats)))); entry_->connectivity_state_ = state;
// Construct a new xds picker which maintains a map of all locality pickers
// that are ready. Each locality is represented by a portion of the range
// proportional to its weight, such that the total range is the sum of the
// weights of all localities
uint32_t end = 0;
size_t num_connecting = 0;
size_t num_idle = 0;
size_t num_transient_failures = 0;
auto& locality_map = this->entry_->parent_->locality_map_.map_;
Picker::PickerList pickers;
for (auto& p : locality_map) {
const LocalityEntry* entry = p.second.get();
grpc_connectivity_state connectivity_state = entry->connectivity_state_;
switch (connectivity_state) {
case GRPC_CHANNEL_READY: {
end += entry->locality_weight_;
pickers.push_back(MakePair(end, entry->picker_ref_));
break;
}
case GRPC_CHANNEL_CONNECTING: {
num_connecting++;
break;
}
case GRPC_CHANNEL_IDLE: {
num_idle++;
break;
}
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
num_transient_failures++;
break;
}
default: {
gpr_log(GPR_ERROR, "Invalid locality connectivity state - %d",
connectivity_state);
}
}
}
// Pass on the constructed xds picker if it has any ready pickers in their map
// otherwise pass a QueuePicker if any of the locality pickers are in a
// connecting or idle state, finally return a transient failure picker if all
// locality pickers are in transient failure
if (pickers.size() > 0) {
entry_->parent_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY,
UniquePtr<LoadBalancingPolicy::SubchannelPicker>(
New<Picker>(std::move(client_stats), std::move(pickers))));
} else if (num_connecting > 0) {
entry_->parent_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
UniquePtr<SubchannelPicker>(New<QueuePicker>(this->entry_->parent_)));
} else if (num_idle > 0) {
entry_->parent_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_IDLE,
UniquePtr<SubchannelPicker>(New<QueuePicker>(this->entry_->parent_)));
} else {
GPR_ASSERT(num_transient_failures == locality_map.size());
grpc_error* error =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"connections to all localities failing"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
entry_->parent_->channel_control_helper()->UpdateState(
state, UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
}
} }
void XdsLb::LocalityMap::LocalityEntry::Helper::RequestReresolution() { void XdsLb::LocalityMap::LocalityEntry::Helper::RequestReresolution() {

@ -43,6 +43,7 @@
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h" #include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/iomgr_custom.h"
#include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/json/json.h" #include "src/core/lib/json/json.h"
@ -430,8 +431,11 @@ static grpc_address_resolver_vtable ares_resolver = {
grpc_resolve_address_ares, blocking_resolve_address_ares}; grpc_resolve_address_ares, blocking_resolve_address_ares};
static bool should_use_ares(const char* resolver_env) { static bool should_use_ares(const char* resolver_env) {
return resolver_env == nullptr || strlen(resolver_env) == 0 || // TODO(lidiz): Remove the "g_custom_iomgr_enabled" flag once c-ares support
gpr_stricmp(resolver_env, "ares") == 0; // custom IO managers (e.g. gevent).
return !g_custom_iomgr_enabled &&
(resolver_env == nullptr || strlen(resolver_env) == 0 ||
gpr_stricmp(resolver_env, "ares") == 0);
} }
void grpc_resolver_dns_ares_init() { void grpc_resolver_dns_ares_init() {

@ -48,7 +48,7 @@
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"

@ -42,8 +42,8 @@
#include "src/core/lib/gpr/alloc.h" #include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
@ -454,13 +454,14 @@ struct Subchannel::ExternalStateWatcher {
grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set_, grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set_,
w->pollset_set); w->pollset_set);
} }
gpr_mu_lock(&w->subchannel->mu_); {
if (w->subchannel->external_state_watcher_list_ == w) { MutexLock lock(&w->subchannel->mu_);
w->subchannel->external_state_watcher_list_ = w->next; if (w->subchannel->external_state_watcher_list_ == w) {
w->subchannel->external_state_watcher_list_ = w->next;
}
if (w->next != nullptr) w->next->prev = w->prev;
if (w->prev != nullptr) w->prev->next = w->next;
} }
if (w->next != nullptr) w->next->prev = w->prev;
if (w->prev != nullptr) w->prev->next = w->next;
gpr_mu_unlock(&w->subchannel->mu_);
GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher+done"); GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher+done");
Delete(w); Delete(w);
GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
@ -582,7 +583,6 @@ Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
"subchannel"); "subchannel");
grpc_connectivity_state_init(&state_and_health_tracker_, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&state_and_health_tracker_, GRPC_CHANNEL_IDLE,
"subchannel"); "subchannel");
gpr_mu_init(&mu_);
// Check whether we should enable health checking. // Check whether we should enable health checking.
const char* service_config_json = grpc_channel_arg_get_string( const char* service_config_json = grpc_channel_arg_get_string(
grpc_channel_args_find(args_, GRPC_ARG_SERVICE_CONFIG)); grpc_channel_args_find(args_, GRPC_ARG_SERVICE_CONFIG));
@ -629,7 +629,6 @@ Subchannel::~Subchannel() {
grpc_connector_unref(connector_); grpc_connector_unref(connector_);
grpc_pollset_set_destroy(pollset_set_); grpc_pollset_set_destroy(pollset_set_);
Delete(key_); Delete(key_);
gpr_mu_destroy(&mu_);
} }
Subchannel* Subchannel::Create(grpc_connector* connector, Subchannel* Subchannel::Create(grpc_connector* connector,
@ -903,7 +902,9 @@ void Subchannel::MaybeStartConnectingLocked() {
void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) { void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
Subchannel* c = static_cast<Subchannel*>(arg); Subchannel* c = static_cast<Subchannel*>(arg);
gpr_mu_lock(&c->mu_); // TODO(soheilhy): Once subchannel refcounting is simplified, we can get use
// MutexLock instead of ReleasableMutexLock, here.
ReleasableMutexLock lock(&c->mu_);
c->have_retry_alarm_ = false; c->have_retry_alarm_ = false;
if (c->disconnected_) { if (c->disconnected_) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected", error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
@ -917,9 +918,9 @@ void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying"); gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
c->ContinueConnectingLocked(); c->ContinueConnectingLocked();
gpr_mu_unlock(&c->mu_); lock.Unlock();
} else { } else {
gpr_mu_unlock(&c->mu_); lock.Unlock();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
@ -944,24 +945,25 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
auto* c = static_cast<Subchannel*>(arg); auto* c = static_cast<Subchannel*>(arg);
grpc_channel_args* delete_channel_args = c->connecting_result_.channel_args; grpc_channel_args* delete_channel_args = c->connecting_result_.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "on_connecting_finished"); GRPC_SUBCHANNEL_WEAK_REF(c, "on_connecting_finished");
gpr_mu_lock(&c->mu_); {
c->connecting_ = false; MutexLock lock(&c->mu_);
if (c->connecting_result_.transport != nullptr && c->connecting_ = false;
c->PublishTransportLocked()) { if (c->connecting_result_.transport != nullptr &&
// Do nothing, transport was published. c->PublishTransportLocked()) {
} else if (c->disconnected_) { // Do nothing, transport was published.
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); } else if (c->disconnected_) {
} else { GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
gpr_log(GPR_INFO, "Connect failed: %s", grpc_error_string(error)); } else {
c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE, gpr_log(GPR_INFO, "Connect failed: %s", grpc_error_string(error));
c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
"connect_failed");
grpc_connectivity_state_set(&c->state_and_health_tracker_,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connect_failed"); "connect_failed");
grpc_connectivity_state_set(&c->state_and_health_tracker_, c->MaybeStartConnectingLocked();
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
"connect_failed"); }
c->MaybeStartConnectingLocked();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} }
gpr_mu_unlock(&c->mu_);
GRPC_SUBCHANNEL_WEAK_UNREF(c, "on_connecting_finished"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "on_connecting_finished");
grpc_channel_args_destroy(delete_channel_args); grpc_channel_args_destroy(delete_channel_args);
} }

@ -29,6 +29,7 @@
#include "src/core/lib/gpr/arena.h" #include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
@ -263,7 +264,7 @@ class Subchannel {
// pollset_set tracking who's interested in a connection being setup. // pollset_set tracking who's interested in a connection being setup.
grpc_pollset_set* pollset_set_; grpc_pollset_set* pollset_set_;
// Protects the other members. // Protects the other members.
gpr_mu mu_; Mutex mu_;
// Refcount // Refcount
// - lower INTERNAL_REF_BITS bits are for internal references: // - lower INTERNAL_REF_BITS bits are for internal references:
// these do not keep the subchannel open. // these do not keep the subchannel open.

@ -23,7 +23,7 @@
#include "src/core/lib/channel/channelz_registry.h" #include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/gpr/useful.h" #include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>

@ -27,8 +27,8 @@
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/exec_ctx.h"

@ -42,7 +42,7 @@ grpc_channel_args* grpc_channel_args_set_compression_algorithm(
* modified to point to the returned instance (which may be different from the * modified to point to the returned instance (which may be different from the
* input value of \a a). */ * input value of \a a). */
grpc_channel_args* grpc_channel_args_compression_algorithm_set_state( grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
grpc_channel_args** a, grpc_compression_algorithm algorithm, int enabled); grpc_channel_args** a, grpc_compression_algorithm algorithm, int state);
/** Returns the bitset representing the support state (true for enabled, false /** Returns the bitset representing the support state (true for enabled, false
* for disabled) for compression algorithms. * for disabled) for compression algorithms.

@ -29,49 +29,10 @@
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h" #include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
namespace { static void* gpr_arena_malloc(size_t size) {
enum init_strategy { return gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
NO_INIT, // Do not initialize the arena blocks.
ZERO_INIT, // Initialize arena blocks with 0.
NON_ZERO_INIT, // Initialize arena blocks with a non-zero value.
};
gpr_once g_init_strategy_once = GPR_ONCE_INIT;
init_strategy g_init_strategy = NO_INIT;
} // namespace
static void set_strategy_from_env() {
char* str = gpr_getenv("GRPC_ARENA_INIT_STRATEGY");
if (str == nullptr) {
g_init_strategy = NO_INIT;
} else if (strcmp(str, "zero_init") == 0) {
g_init_strategy = ZERO_INIT;
} else if (strcmp(str, "non_zero_init") == 0) {
g_init_strategy = NON_ZERO_INIT;
} else {
g_init_strategy = NO_INIT;
}
gpr_free(str);
}
static void* gpr_arena_alloc_maybe_init(size_t size) {
void* mem = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
if (GPR_UNLIKELY(g_init_strategy != NO_INIT)) {
if (g_init_strategy == ZERO_INIT) {
memset(mem, 0, size);
} else { // NON_ZERO_INIT.
memset(mem, 0xFE, size);
}
}
return mem;
}
void gpr_arena_init() {
gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
} }
// Uncomment this to use a simple arena that simply allocates the // Uncomment this to use a simple arena that simply allocates the
@ -109,8 +70,7 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
gpr_mu_lock(&arena->mu); gpr_mu_lock(&arena->mu);
arena->ptrs = arena->ptrs =
(void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1)); (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
void* retval = arena->ptrs[arena->num_ptrs++] = void* retval = arena->ptrs[arena->num_ptrs++] = gpr_arena_malloc(size);
gpr_arena_alloc_maybe_init(size);
gpr_mu_unlock(&arena->mu); gpr_mu_unlock(&arena->mu);
return retval; return retval;
} }
@ -154,7 +114,7 @@ struct gpr_arena {
gpr_arena* gpr_arena_create(size_t initial_size) { gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size); initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
return new (gpr_arena_alloc_maybe_init( return new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size)) GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
gpr_arena(initial_size); gpr_arena(initial_size);
} }
@ -179,7 +139,7 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
// sizing historesis (that is, most calls should have a large enough initial // sizing historesis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena). // zone and will not need to grow the arena).
gpr_mu_lock(&arena->arena_growth_mutex); gpr_mu_lock(&arena->arena_growth_mutex);
zone* z = new (gpr_arena_alloc_maybe_init( zone* z = new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone(); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
arena->last_zone->next = z; arena->last_zone->next = z;
arena->last_zone = z; arena->last_zone = z;

@ -37,7 +37,5 @@ gpr_arena* gpr_arena_create(size_t initial_size);
void* gpr_arena_alloc(gpr_arena* arena, size_t size); void* gpr_arena_alloc(gpr_arena* arena, size_t size);
// Destroy an arena, returning the total number of bytes allocated // Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena* arena); size_t gpr_arena_destroy(gpr_arena* arena);
// Initializes the Arena component.
void gpr_arena_init();
#endif /* GRPC_CORE_LIB_GPR_ARENA_H */ #endif /* GRPC_CORE_LIB_GPR_ARENA_H */

@ -1,42 +0,0 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H
#define GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
namespace grpc_core {
class MutexLock {
public:
explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); }
~MutexLock() { gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H */

@ -0,0 +1,126 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_GPRPP_SYNC_H
#define GRPC_CORE_LIB_GPRPP_SYNC_H
#include <grpc/impl/codegen/port_platform.h>
#include <grpc/impl/codegen/log.h>
#include <grpc/impl/codegen/sync.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
// The core library is not accessible in C++ codegen headers, and vice versa.
// Thus, we need to have duplicate headers with similar functionality.
// Make sure any change to this file is also reflected in
// include/grpcpp/impl/codegen/sync.h.
//
// Whenever possible, prefer using this file over <grpcpp/impl/codegen/sync.h>
// since this file doesn't rely on g_core_codegen_interface and hence does not
// pay the costs of virtual function calls.
namespace grpc_core {
class Mutex {
public:
Mutex() { gpr_mu_init(&mu_); }
~Mutex() { gpr_mu_destroy(&mu_); }
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
gpr_mu* get() { return &mu_; }
const gpr_mu* get() const { return &mu_; }
private:
gpr_mu mu_;
};
// MutexLock is a std::
class MutexLock {
public:
explicit MutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); }
explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu_); }
~MutexLock() { gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
class ReleasableMutexLock {
public:
explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); }
explicit ReleasableMutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu_); }
~ReleasableMutexLock() {
if (!released_) gpr_mu_unlock(mu_);
}
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
void Lock() {
GPR_DEBUG_ASSERT(released_);
gpr_mu_lock(mu_);
released_ = false;
}
void Unlock() {
GPR_DEBUG_ASSERT(!released_);
released_ = true;
gpr_mu_unlock(mu_);
}
private:
gpr_mu* const mu_;
bool released_ = false;
};
class CondVar {
public:
CondVar() { gpr_cv_init(&cv_); }
~CondVar() { gpr_cv_destroy(&cv_); }
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
void Signal() { gpr_cv_signal(&cv_); }
void Broadcast() { gpr_cv_broadcast(&cv_); }
int Wait(Mutex* mu) { return Wait(mu, gpr_inf_future(GPR_CLOCK_REALTIME)); }
int Wait(Mutex* mu, const gpr_timespec& deadline) {
return gpr_cv_wait(&cv_, mu->get(), deadline);
}
template <typename Predicate>
void WaitUntil(Mutex* mu, Predicate pred) {
while (!pred()) {
Wait(mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
}
private:
gpr_cv cv_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_SYNC_H */

@ -47,7 +47,7 @@
#include "src/core/lib/gpr/useful.h" #include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/is_epollexclusive_available.h" #include "src/core/lib/iomgr/is_epollexclusive_available.h"

@ -49,6 +49,8 @@ static bool iomgr_platform_add_closure_to_background_poller(
return false; return false;
} }
bool g_custom_iomgr_enabled = false;
static grpc_iomgr_platform_vtable vtable = { static grpc_iomgr_platform_vtable vtable = {
iomgr_platform_init, iomgr_platform_init,
iomgr_platform_flush, iomgr_platform_flush,
@ -61,6 +63,7 @@ void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
grpc_custom_resolver_vtable* resolver, grpc_custom_resolver_vtable* resolver,
grpc_custom_timer_vtable* timer, grpc_custom_timer_vtable* timer,
grpc_custom_poller_vtable* poller) { grpc_custom_poller_vtable* poller) {
g_custom_iomgr_enabled = true;
grpc_custom_endpoint_init(socket); grpc_custom_endpoint_init(socket);
grpc_custom_timer_init(timer); grpc_custom_timer_init(timer);
grpc_custom_pollset_init(poller); grpc_custom_pollset_init(poller);

@ -39,6 +39,8 @@ extern gpr_thd_id g_init_thread;
#define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD() #define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD()
#endif /* GRPC_CUSTOM_IOMGR_THREAD_CHECK */ #endif /* GRPC_CUSTOM_IOMGR_THREAD_CHECK */
extern bool g_custom_iomgr_enabled;
void grpc_custom_iomgr_init(grpc_socket_vtable* socket, void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
grpc_custom_resolver_vtable* resolver, grpc_custom_resolver_vtable* resolver,
grpc_custom_timer_vtable* timer, grpc_custom_timer_vtable* timer,

@ -33,7 +33,7 @@
#include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/fork.h" #include "src/core/lib/gprpp/fork.h"
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/http/parser.h" #include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
@ -133,7 +133,6 @@ void grpc_init(void) {
} }
grpc_core::Fork::GlobalInit(); grpc_core::Fork::GlobalInit();
grpc_fork_handlers_auto_register(); grpc_fork_handlers_auto_register();
gpr_arena_init();
grpc_stats_init(); grpc_stats_init();
grpc_slice_intern_init(); grpc_slice_intern_init();
grpc_mdctx_global_init(); grpc_mdctx_global_init();

@ -18,7 +18,7 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/ssl/session_cache/ssl_session.h" #include "src/core/tsi/ssl/session_cache/ssl_session.h"
#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h" #include "src/core/tsi/ssl/session_cache/ssl_session_cache.h"

@ -232,7 +232,7 @@ class ShutdownCallback : public grpc_experimental_completion_queue_functor {
CompletionQueue* Channel::CallbackCQ() { CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ // TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered // if there is no explicit per-channel CQ registered
std::lock_guard<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
if (callback_cq_ == nullptr) { if (callback_cq_ == nullptr) {
auto* shutdown_callback = new ShutdownCallback; auto* shutdown_callback = new ShutdownCallback;
callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{ callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{

@ -25,6 +25,7 @@
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include <grpcpp/impl/codegen/interceptor_common.h> #include <grpcpp/impl/codegen/interceptor_common.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/impl/grpc_library.h> #include <grpcpp/impl/grpc_library.h>
#include <grpcpp/security/credentials.h> #include <grpcpp/security/credentials.h>
#include <grpcpp/server_context.h> #include <grpcpp/server_context.h>
@ -84,7 +85,7 @@ void ClientContext::AddMetadata(const grpc::string& meta_key,
void ClientContext::set_call(grpc_call* call, void ClientContext::set_call(grpc_call* call,
const std::shared_ptr<Channel>& channel) { const std::shared_ptr<Channel>& channel) {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
GPR_ASSERT(call_ == nullptr); GPR_ASSERT(call_ == nullptr);
call_ = call; call_ = call;
channel_ = channel; channel_ = channel;
@ -114,7 +115,7 @@ void ClientContext::set_compression_algorithm(
} }
void ClientContext::TryCancel() { void ClientContext::TryCancel() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
if (call_) { if (call_) {
SendCancelToInterceptors(); SendCancelToInterceptors();
grpc_call_cancel(call_, nullptr); grpc_call_cancel(call_, nullptr);

@ -21,6 +21,7 @@
#include <mutex> #include <mutex>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpcpp/impl/codegen/sync.h>
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
@ -40,27 +41,27 @@ DynamicThreadPool::DynamicThread::~DynamicThread() { thd_.Join(); }
void DynamicThreadPool::DynamicThread::ThreadFunc() { void DynamicThreadPool::DynamicThread::ThreadFunc() {
pool_->ThreadFunc(); pool_->ThreadFunc();
// Now that we have killed ourselves, we should reduce the thread count // Now that we have killed ourselves, we should reduce the thread count
std::unique_lock<std::mutex> lock(pool_->mu_); grpc_core::MutexLock lock(&pool_->mu_);
pool_->nthreads_--; pool_->nthreads_--;
// Move ourselves to dead list // Move ourselves to dead list
pool_->dead_threads_.push_back(this); pool_->dead_threads_.push_back(this);
if ((pool_->shutdown_) && (pool_->nthreads_ == 0)) { if ((pool_->shutdown_) && (pool_->nthreads_ == 0)) {
pool_->shutdown_cv_.notify_one(); pool_->shutdown_cv_.Signal();
} }
} }
void DynamicThreadPool::ThreadFunc() { void DynamicThreadPool::ThreadFunc() {
for (;;) { for (;;) {
// Wait until work is available or we are shutting down. // Wait until work is available or we are shutting down.
std::unique_lock<std::mutex> lock(mu_); grpc_core::ReleasableMutexLock lock(&mu_);
if (!shutdown_ && callbacks_.empty()) { if (!shutdown_ && callbacks_.empty()) {
// If there are too many threads waiting, then quit this thread // If there are too many threads waiting, then quit this thread
if (threads_waiting_ >= reserve_threads_) { if (threads_waiting_ >= reserve_threads_) {
break; break;
} }
threads_waiting_++; threads_waiting_++;
cv_.wait(lock); cv_.Wait(&mu_);
threads_waiting_--; threads_waiting_--;
} }
// Drain callbacks before considering shutdown to ensure all work // Drain callbacks before considering shutdown to ensure all work
@ -68,7 +69,7 @@ void DynamicThreadPool::ThreadFunc() {
if (!callbacks_.empty()) { if (!callbacks_.empty()) {
auto cb = callbacks_.front(); auto cb = callbacks_.front();
callbacks_.pop(); callbacks_.pop();
lock.unlock(); lock.Unlock();
cb(); cb();
} else if (shutdown_) { } else if (shutdown_) {
break; break;
@ -82,7 +83,7 @@ DynamicThreadPool::DynamicThreadPool(int reserve_threads)
nthreads_(0), nthreads_(0),
threads_waiting_(0) { threads_waiting_(0) {
for (int i = 0; i < reserve_threads_; i++) { for (int i = 0; i < reserve_threads_; i++) {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
nthreads_++; nthreads_++;
new DynamicThread(this); new DynamicThread(this);
} }
@ -95,17 +96,17 @@ void DynamicThreadPool::ReapThreads(std::list<DynamicThread*>* tlist) {
} }
DynamicThreadPool::~DynamicThreadPool() { DynamicThreadPool::~DynamicThreadPool() {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
shutdown_ = true; shutdown_ = true;
cv_.notify_all(); cv_.Broadcast();
while (nthreads_ != 0) { while (nthreads_ != 0) {
shutdown_cv_.wait(lock); shutdown_cv_.Wait(&mu_);
} }
ReapThreads(&dead_threads_); ReapThreads(&dead_threads_);
} }
void DynamicThreadPool::Add(const std::function<void()>& callback) { void DynamicThreadPool::Add(const std::function<void()>& callback) {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
// Add works to the callbacks list // Add works to the callbacks list
callbacks_.push(callback); callbacks_.push(callback);
// Increase pool size or notify as needed // Increase pool size or notify as needed
@ -114,7 +115,7 @@ void DynamicThreadPool::Add(const std::function<void()>& callback) {
nthreads_++; nthreads_++;
new DynamicThread(this); new DynamicThread(this);
} else { } else {
cv_.notify_one(); cv_.Signal();
} }
// Also use this chance to harvest dead threads // Also use this chance to harvest dead threads
if (!dead_threads_.empty()) { if (!dead_threads_.empty()) {

@ -27,6 +27,7 @@
#include <grpcpp/support/config.h> #include <grpcpp/support/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
#include "src/cpp/server/thread_pool_interface.h" #include "src/cpp/server/thread_pool_interface.h"
@ -50,9 +51,9 @@ class DynamicThreadPool final : public ThreadPoolInterface {
grpc_core::Thread thd_; grpc_core::Thread thd_;
void ThreadFunc(); void ThreadFunc();
}; };
std::mutex mu_; grpc_core::Mutex mu_;
std::condition_variable cv_; grpc_core::CondVar cv_;
std::condition_variable shutdown_cv_; grpc_core::CondVar shutdown_cv_;
bool shutdown_; bool shutdown_;
std::queue<std::function<void()>> callbacks_; std::queue<std::function<void()>> callbacks_;
int reserve_threads_; int reserve_threads_;

@ -41,7 +41,7 @@ DefaultHealthCheckService::DefaultHealthCheckService() {
void DefaultHealthCheckService::SetServingStatus( void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) { const grpc::string& service_name, bool serving) {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
if (shutdown_) { if (shutdown_) {
// Set to NOT_SERVING in case service_name is not in the map. // Set to NOT_SERVING in case service_name is not in the map.
serving = false; serving = false;
@ -51,7 +51,7 @@ void DefaultHealthCheckService::SetServingStatus(
void DefaultHealthCheckService::SetServingStatus(bool serving) { void DefaultHealthCheckService::SetServingStatus(bool serving) {
const ServingStatus status = serving ? SERVING : NOT_SERVING; const ServingStatus status = serving ? SERVING : NOT_SERVING;
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
if (shutdown_) { if (shutdown_) {
return; return;
} }
@ -62,7 +62,7 @@ void DefaultHealthCheckService::SetServingStatus(bool serving) {
} }
void DefaultHealthCheckService::Shutdown() { void DefaultHealthCheckService::Shutdown() {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
if (shutdown_) { if (shutdown_) {
return; return;
} }
@ -76,7 +76,7 @@ void DefaultHealthCheckService::Shutdown() {
DefaultHealthCheckService::ServingStatus DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus( DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const { const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
auto it = services_map_.find(service_name); auto it = services_map_.find(service_name);
if (it == services_map_.end()) { if (it == services_map_.end()) {
return NOT_FOUND; return NOT_FOUND;
@ -88,7 +88,7 @@ DefaultHealthCheckService::GetServingStatus(
void DefaultHealthCheckService::RegisterCallHandler( void DefaultHealthCheckService::RegisterCallHandler(
const grpc::string& service_name, const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) { std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
ServiceData& service_data = services_map_[service_name]; ServiceData& service_data = services_map_[service_name];
service_data.AddCallHandler(handler /* copies ref */); service_data.AddCallHandler(handler /* copies ref */);
HealthCheckServiceImpl::CallHandler* h = handler.get(); HealthCheckServiceImpl::CallHandler* h = handler.get();
@ -98,7 +98,7 @@ void DefaultHealthCheckService::RegisterCallHandler(
void DefaultHealthCheckService::UnregisterCallHandler( void DefaultHealthCheckService::UnregisterCallHandler(
const grpc::string& service_name, const grpc::string& service_name,
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) { const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
auto it = services_map_.find(service_name); auto it = services_map_.find(service_name);
if (it == services_map_.end()) return; if (it == services_map_.end()) return;
ServiceData& service_data = it->second; ServiceData& service_data = it->second;
@ -166,7 +166,7 @@ DefaultHealthCheckService::HealthCheckServiceImpl::~HealthCheckServiceImpl() {
// We will reach here after the server starts shutting down. // We will reach here after the server starts shutting down.
shutdown_ = true; shutdown_ = true;
{ {
std::unique_lock<std::mutex> lock(cq_shutdown_mu_); grpc_core::MutexLock lock(&cq_shutdown_mu_);
cq_->Shutdown(); cq_->Shutdown();
} }
thread_->Join(); thread_->Join();
@ -266,7 +266,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
std::make_shared<CheckCallHandler>(cq, database, service); std::make_shared<CheckCallHandler>(cq, database, service);
CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get()); CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get());
{ {
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_); grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return; if (service->shutdown_) return;
// Request a Check() call. // Request a Check() call.
handler->next_ = handler->next_ =
@ -311,7 +311,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
} }
// Send response. // Send response.
{ {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::MutexLock lock(&service_->cq_shutdown_mu_);
if (!service_->shutdown_) { if (!service_->shutdown_) {
next_ = next_ =
CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this, CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this,
@ -347,7 +347,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
std::make_shared<WatchCallHandler>(cq, database, service); std::make_shared<WatchCallHandler>(cq, database, service);
WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get()); WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get());
{ {
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_); grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return; if (service->shutdown_) return;
// Request AsyncNotifyWhenDone(). // Request AsyncNotifyWhenDone().
handler->on_done_notified_ = handler->on_done_notified_ =
@ -402,7 +402,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler:: void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) { SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) {
std::unique_lock<std::mutex> lock(send_mu_); grpc_core::MutexLock lock(&send_mu_);
// If there's already a send in flight, cache the new status, and // If there's already a send in flight, cache the new status, and
// we'll start a new send for it when the one in flight completes. // we'll start a new send for it when the one in flight completes.
if (send_in_flight_) { if (send_in_flight_) {
@ -420,7 +420,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
ByteBuffer response; ByteBuffer response;
bool success = service_->EncodeResponse(status, &response); bool success = service_->EncodeResponse(status, &response);
// Grab shutdown lock and send response. // Grab shutdown lock and send response.
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_); grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) { if (service_->shutdown_) {
SendFinishLocked(std::move(self), Status::CANCELLED); SendFinishLocked(std::move(self), Status::CANCELLED);
return; return;
@ -442,7 +442,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinish(std::move(self), Status::CANCELLED); SendFinish(std::move(self), Status::CANCELLED);
return; return;
} }
std::unique_lock<std::mutex> lock(send_mu_); grpc_core::MutexLock lock(&send_mu_);
send_in_flight_ = false; send_in_flight_ = false;
// If we got a new status since we started the last send, start a // If we got a new status since we started the last send, start a
// new send for it. // new send for it.
@ -456,7 +456,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler:: void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinish(std::shared_ptr<CallHandler> self, const Status& status) { SendFinish(std::shared_ptr<CallHandler> self, const Status& status) {
if (finish_called_) return; if (finish_called_) return;
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_); grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) return; if (service_->shutdown_) return;
SendFinishLocked(std::move(self), status); SendFinishLocked(std::move(self), status);
} }

@ -31,6 +31,7 @@
#include <grpcpp/impl/codegen/service_type.h> #include <grpcpp/impl/codegen/service_type.h>
#include <grpcpp/support/byte_buffer.h> #include <grpcpp/support/byte_buffer.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
namespace grpc { namespace grpc {
@ -197,7 +198,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
GenericServerAsyncWriter stream_; GenericServerAsyncWriter stream_;
ServerContext ctx_; ServerContext ctx_;
std::mutex send_mu_; grpc_core::Mutex send_mu_;
bool send_in_flight_ = false; // Guarded by mu_. bool send_in_flight_ = false; // Guarded by mu_.
ServingStatus pending_status_ = NOT_FOUND; // Guarded by mu_. ServingStatus pending_status_ = NOT_FOUND; // Guarded by mu_.
@ -226,7 +227,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
// To synchronize the operations related to shutdown state of cq_, so that // To synchronize the operations related to shutdown state of cq_, so that
// we don't enqueue new tags into cq_ after it is already shut down. // we don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_; grpc_core::Mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false}; std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_; std::unique_ptr<::grpc_core::Thread> thread_;
}; };
@ -273,7 +274,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
const grpc::string& service_name, const grpc::string& service_name,
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler); const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler);
mutable std::mutex mu_; mutable grpc_core::Mutex mu_;
bool shutdown_ = false; // Guarded by mu_. bool shutdown_ = false; // Guarded by mu_.
std::map<grpc::string, ServiceData> services_map_; // Guarded by mu_. std::map<grpc::string, ServiceData> services_map_; // Guarded by mu_.
std::unique_ptr<HealthCheckServiceImpl> impl_; std::unique_ptr<HealthCheckServiceImpl> impl_;

@ -239,7 +239,7 @@ grpc::string LoadReporter::GenerateLbId() {
::grpc::lb::v1::LoadBalancingFeedback ::grpc::lb::v1::LoadBalancingFeedback
LoadReporter::GenerateLoadBalancingFeedback() { LoadReporter::GenerateLoadBalancingFeedback() {
std::unique_lock<std::mutex> lock(feedback_mu_); grpc_core::ReleasableMutexLock lock(&feedback_mu_);
auto now = std::chrono::system_clock::now(); auto now = std::chrono::system_clock::now();
// Discard records outside the window until there is only one record // Discard records outside the window until there is only one record
// outside the window, which is used as the base for difference. // outside the window, which is used as the base for difference.
@ -277,7 +277,7 @@ LoadReporter::GenerateLoadBalancingFeedback() {
double cpu_limit = newest->cpu_limit - oldest->cpu_limit; double cpu_limit = newest->cpu_limit - oldest->cpu_limit;
std::chrono::duration<double> duration_seconds = std::chrono::duration<double> duration_seconds =
newest->end_time - oldest->end_time; newest->end_time - oldest->end_time;
lock.unlock(); lock.Unlock();
::grpc::lb::v1::LoadBalancingFeedback feedback; ::grpc::lb::v1::LoadBalancingFeedback feedback;
feedback.set_server_utilization(static_cast<float>(cpu_usage / cpu_limit)); feedback.set_server_utilization(static_cast<float>(cpu_usage / cpu_limit));
feedback.set_calls_per_second( feedback.set_calls_per_second(
@ -290,7 +290,7 @@ LoadReporter::GenerateLoadBalancingFeedback() {
::google::protobuf::RepeatedPtrField<::grpc::lb::v1::Load> ::google::protobuf::RepeatedPtrField<::grpc::lb::v1::Load>
LoadReporter::GenerateLoads(const grpc::string& hostname, LoadReporter::GenerateLoads(const grpc::string& hostname,
const grpc::string& lb_id) { const grpc::string& lb_id) {
std::lock_guard<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
auto assigned_stores = load_data_store_.GetAssignedStores(hostname, lb_id); auto assigned_stores = load_data_store_.GetAssignedStores(hostname, lb_id);
GPR_ASSERT(assigned_stores != nullptr); GPR_ASSERT(assigned_stores != nullptr);
GPR_ASSERT(!assigned_stores->empty()); GPR_ASSERT(!assigned_stores->empty());
@ -371,7 +371,7 @@ void LoadReporter::AppendNewFeedbackRecord(uint64_t rpcs, uint64_t errors) {
// This will make the load balancing feedback generation a no-op. // This will make the load balancing feedback generation a no-op.
cpu_stats = {0, 0}; cpu_stats = {0, 0};
} }
std::unique_lock<std::mutex> lock(feedback_mu_); grpc_core::MutexLock lock(&feedback_mu_);
feedback_records_.emplace_back(std::chrono::system_clock::now(), rpcs, errors, feedback_records_.emplace_back(std::chrono::system_clock::now(), rpcs, errors,
cpu_stats.first, cpu_stats.second); cpu_stats.first, cpu_stats.second);
} }
@ -379,7 +379,7 @@ void LoadReporter::AppendNewFeedbackRecord(uint64_t rpcs, uint64_t errors) {
void LoadReporter::ReportStreamCreated(const grpc::string& hostname, void LoadReporter::ReportStreamCreated(const grpc::string& hostname,
const grpc::string& lb_id, const grpc::string& lb_id,
const grpc::string& load_key) { const grpc::string& load_key) {
std::lock_guard<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
load_data_store_.ReportStreamCreated(hostname, lb_id, load_key); load_data_store_.ReportStreamCreated(hostname, lb_id, load_key);
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[LR %p] Report stream created (host: %s, LB ID: %s, load key: %s).", "[LR %p] Report stream created (host: %s, LB ID: %s, load key: %s).",
@ -388,7 +388,7 @@ void LoadReporter::ReportStreamCreated(const grpc::string& hostname,
void LoadReporter::ReportStreamClosed(const grpc::string& hostname, void LoadReporter::ReportStreamClosed(const grpc::string& hostname,
const grpc::string& lb_id) { const grpc::string& lb_id) {
std::lock_guard<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
load_data_store_.ReportStreamClosed(hostname, lb_id); load_data_store_.ReportStreamClosed(hostname, lb_id);
gpr_log(GPR_INFO, "[LR %p] Report stream closed (host: %s, LB ID: %s).", this, gpr_log(GPR_INFO, "[LR %p] Report stream closed (host: %s, LB ID: %s).", this,
hostname.c_str(), lb_id.c_str()); hostname.c_str(), lb_id.c_str());
@ -407,7 +407,7 @@ void LoadReporter::ProcessViewDataCallStart(
LoadRecordKey key(client_ip_and_token, user_id); LoadRecordKey key(client_ip_and_token, user_id);
LoadRecordValue value = LoadRecordValue(start_count); LoadRecordValue value = LoadRecordValue(start_count);
{ {
std::unique_lock<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value); load_data_store_.MergeRow(host, key, value);
} }
} }
@ -459,7 +459,7 @@ void LoadReporter::ProcessViewDataCallEnd(
LoadRecordValue value = LoadRecordValue( LoadRecordValue value = LoadRecordValue(
0, ok_count, error_count, bytes_sent, bytes_received, latency_ms); 0, ok_count, error_count, bytes_sent, bytes_received, latency_ms);
{ {
std::unique_lock<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value); load_data_store_.MergeRow(host, key, value);
} }
} }
@ -486,7 +486,7 @@ void LoadReporter::ProcessViewDataOtherCallMetrics(
LoadRecordValue value = LoadRecordValue( LoadRecordValue value = LoadRecordValue(
metric_name, static_cast<uint64_t>(num_calls), total_metric_value); metric_name, static_cast<uint64_t>(num_calls), total_metric_value);
{ {
std::unique_lock<std::mutex> lock(store_mu_); grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value); load_data_store_.MergeRow(host, key, value);
} }
} }

@ -29,6 +29,7 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpcpp/impl/codegen/config.h> #include <grpcpp/impl/codegen/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/cpp/server/load_reporter/load_data_store.h" #include "src/cpp/server/load_reporter/load_data_store.h"
#include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h" #include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h"
@ -212,11 +213,11 @@ class LoadReporter {
std::atomic<int64_t> next_lb_id_{0}; std::atomic<int64_t> next_lb_id_{0};
const std::chrono::seconds feedback_sample_window_seconds_; const std::chrono::seconds feedback_sample_window_seconds_;
std::mutex feedback_mu_; grpc_core::Mutex feedback_mu_;
std::deque<LoadBalancingFeedbackRecord> feedback_records_; std::deque<LoadBalancingFeedbackRecord> feedback_records_;
// TODO(juanlishen): Lock in finer grain. Locking the whole store may be // TODO(juanlishen): Lock in finer grain. Locking the whole store may be
// too expensive. // too expensive.
std::mutex store_mu_; grpc_core::Mutex store_mu_;
LoadDataStore load_data_store_; LoadDataStore load_data_store_;
std::unique_ptr<CensusViewProvider> census_view_provider_; std::unique_ptr<CensusViewProvider> census_view_provider_;
std::unique_ptr<CpuStatsProvider> cpu_stats_provider_; std::unique_ptr<CpuStatsProvider> cpu_stats_provider_;

@ -48,7 +48,7 @@ LoadReporterAsyncServiceImpl::~LoadReporterAsyncServiceImpl() {
// We will reach here after the server starts shutting down. // We will reach here after the server starts shutting down.
shutdown_ = true; shutdown_ = true;
{ {
std::unique_lock<std::mutex> lock(cq_shutdown_mu_); grpc_core::MutexLock lock(&cq_shutdown_mu_);
cq_->Shutdown(); cq_->Shutdown();
} }
if (next_fetch_and_sample_alarm_ != nullptr) if (next_fetch_and_sample_alarm_ != nullptr)
@ -62,7 +62,7 @@ void LoadReporterAsyncServiceImpl::ScheduleNextFetchAndSample() {
gpr_time_from_millis(kFetchAndSampleIntervalSeconds * 1000, gpr_time_from_millis(kFetchAndSampleIntervalSeconds * 1000,
GPR_TIMESPAN)); GPR_TIMESPAN));
{ {
std::unique_lock<std::mutex> lock(cq_shutdown_mu_); grpc_core::MutexLock lock(&cq_shutdown_mu_);
if (shutdown_) return; if (shutdown_) return;
// TODO(juanlishen): Improve the Alarm implementation to reuse a single // TODO(juanlishen): Improve the Alarm implementation to reuse a single
// instance for multiple events. // instance for multiple events.
@ -119,7 +119,7 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::CreateAndStart(
std::make_shared<ReportLoadHandler>(cq, service, load_reporter); std::make_shared<ReportLoadHandler>(cq, service, load_reporter);
ReportLoadHandler* p = handler.get(); ReportLoadHandler* p = handler.get();
{ {
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_); grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return; if (service->shutdown_) return;
p->on_done_notified_ = p->on_done_notified_ =
CallableTag(std::bind(&ReportLoadHandler::OnDoneNotified, p, CallableTag(std::bind(&ReportLoadHandler::OnDoneNotified, p,
@ -164,9 +164,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnRequestDelivered(
// instance will deallocate itself when it's done. // instance will deallocate itself when it's done.
CreateAndStart(cq_, service_, load_reporter_); CreateAndStart(cq_, service_, load_reporter_);
{ {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) { if (service_->shutdown_) {
lock.release()->unlock(); lock.Unlock();
Shutdown(std::move(self), "OnRequestDelivered"); Shutdown(std::move(self), "OnRequestDelivered");
return; return;
} }
@ -222,9 +222,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone(
SendReport(self, true /* ok */); SendReport(self, true /* ok */);
// Expect this read to fail. // Expect this read to fail.
{ {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) { if (service_->shutdown_) {
lock.release()->unlock(); lock.Unlock();
Shutdown(std::move(self), "OnReadDone"); Shutdown(std::move(self), "OnReadDone");
return; return;
} }
@ -254,9 +254,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::ScheduleNextReport(
gpr_now(GPR_CLOCK_MONOTONIC), gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(load_report_interval_ms_, GPR_TIMESPAN)); gpr_time_from_millis(load_report_interval_ms_, GPR_TIMESPAN));
{ {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) { if (service_->shutdown_) {
lock.release()->unlock(); lock.Unlock();
Shutdown(std::move(self), "ScheduleNextReport"); Shutdown(std::move(self), "ScheduleNextReport");
return; return;
} }
@ -294,9 +294,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::SendReport(
call_status_ = INITIAL_RESPONSE_SENT; call_status_ = INITIAL_RESPONSE_SENT;
} }
{ {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) { if (service_->shutdown_) {
lock.release()->unlock(); lock.Unlock();
Shutdown(std::move(self), "SendReport"); Shutdown(std::move(self), "SendReport");
return; return;
} }
@ -342,7 +342,7 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::Shutdown(
// OnRequestDelivered() may be called after OnDoneNotified(), so we need to // OnRequestDelivered() may be called after OnDoneNotified(), so we need to
// try to Finish() every time we are in Shutdown(). // try to Finish() every time we are in Shutdown().
if (call_status_ >= DELIVERED && call_status_ < FINISH_CALLED) { if (call_status_ >= DELIVERED && call_status_ < FINISH_CALLED) {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_); grpc_core::MutexLock lock(&service_->cq_shutdown_mu_);
if (!service_->shutdown_) { if (!service_->shutdown_) {
on_finish_done_ = on_finish_done_ =
CallableTag(std::bind(&ReportLoadHandler::OnFinishDone, this, CallableTag(std::bind(&ReportLoadHandler::OnFinishDone, this,

@ -25,6 +25,7 @@
#include <grpcpp/alarm.h> #include <grpcpp/alarm.h>
#include <grpcpp/grpcpp.h> #include <grpcpp/grpcpp.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
#include "src/cpp/server/load_reporter/load_reporter.h" #include "src/cpp/server/load_reporter/load_reporter.h"
@ -181,7 +182,7 @@ class LoadReporterAsyncServiceImpl
std::unique_ptr<ServerCompletionQueue> cq_; std::unique_ptr<ServerCompletionQueue> cq_;
// To synchronize the operations related to shutdown state of cq_, so that we // To synchronize the operations related to shutdown state of cq_, so that we
// don't enqueue new tags into cq_ after it is already shut down. // don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_; grpc_core::Mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false}; std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_; std::unique_ptr<::grpc_core::Thread> thread_;
std::unique_ptr<LoadReporter> load_reporter_; std::unique_ptr<LoadReporter> load_reporter_;

@ -388,9 +388,9 @@ class Server::CallbackRequest final : public Server::CallbackRequestBase {
// The counter of outstanding requests must be decremented // The counter of outstanding requests must be decremented
// under a lock in case it causes the server shutdown. // under a lock in case it causes the server shutdown.
std::lock_guard<std::mutex> l(server_->callback_reqs_mu_); grpc::internal::MutexLock l(&server_->callback_reqs_mu_);
if (--server_->callback_reqs_outstanding_ == 0) { if (--server_->callback_reqs_outstanding_ == 0) {
server_->callback_reqs_done_cv_.notify_one(); server_->callback_reqs_done_cv_.Signal();
} }
} }
@ -814,12 +814,12 @@ Server::Server(
Server::~Server() { Server::~Server() {
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::ReleasableMutexLock lock(&mu_);
if (callback_cq_ != nullptr) { if (callback_cq_ != nullptr) {
callback_cq_->Shutdown(); callback_cq_->Shutdown();
} }
if (started_ && !shutdown_) { if (started_ && !shutdown_) {
lock.unlock(); lock.Unlock();
Shutdown(); Shutdown();
} else if (!started_) { } else if (!started_) {
// Shutdown the completion queues // Shutdown the completion queues
@ -1051,7 +1051,7 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
} }
void Server::ShutdownInternal(gpr_timespec deadline) { void Server::ShutdownInternal(gpr_timespec deadline) {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
if (shutdown_) { if (shutdown_) {
return; return;
} }
@ -1102,9 +1102,9 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
// will report a failure, indicating a shutdown and again we won't end // will report a failure, indicating a shutdown and again we won't end
// up incrementing the counter. // up incrementing the counter.
{ {
std::unique_lock<std::mutex> cblock(callback_reqs_mu_); grpc::internal::MutexLock cblock(&callback_reqs_mu_);
callback_reqs_done_cv_.wait( callback_reqs_done_cv_.WaitUntil(
cblock, [this] { return callback_reqs_outstanding_ == 0; }); &callback_reqs_mu_, [this] { return callback_reqs_outstanding_ == 0; });
} }
// Drain the shutdown queue (if the previous call to AsyncNext() timed out // Drain the shutdown queue (if the previous call to AsyncNext() timed out
@ -1114,13 +1114,13 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
} }
shutdown_notified_ = true; shutdown_notified_ = true;
shutdown_cv_.notify_all(); shutdown_cv_.Broadcast();
} }
void Server::Wait() { void Server::Wait() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
while (started_ && !shutdown_notified_) { while (started_ && !shutdown_notified_) {
shutdown_cv_.wait(lock); shutdown_cv_.Wait(&mu_);
} }
} }
@ -1322,7 +1322,7 @@ class ShutdownCallback : public grpc_experimental_completion_queue_functor {
CompletionQueue* Server::CallbackCQ() { CompletionQueue* Server::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ // TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-server CQ registered // if there is no explicit per-server CQ registered
std::lock_guard<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
if (callback_cq_ == nullptr) { if (callback_cq_ == nullptr) {
auto* shutdown_callback = new ShutdownCallback; auto* shutdown_callback = new ShutdownCallback;
callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{ callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{

@ -33,6 +33,7 @@
#include <grpcpp/support/time.h> #include <grpcpp/support/time.h>
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
namespace grpc { namespace grpc {
@ -96,7 +97,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
} }
void SetCancelCallback(std::function<void()> callback) { void SetCancelCallback(std::function<void()> callback) {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
if (finalized_ && (cancelled_ != 0)) { if (finalized_ && (cancelled_ != 0)) {
callback(); callback();
@ -107,7 +108,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
} }
void ClearCancelCallback() { void ClearCancelCallback() {
std::lock_guard<std::mutex> g(mu_); grpc_core::MutexLock g(&mu_);
cancel_callback_ = nullptr; cancel_callback_ = nullptr;
} }
@ -144,7 +145,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
private: private:
bool CheckCancelledNoPluck() { bool CheckCancelledNoPluck() {
std::lock_guard<std::mutex> g(mu_); grpc_core::MutexLock lock(&mu_);
return finalized_ ? (cancelled_ != 0) : false; return finalized_ ? (cancelled_ != 0) : false;
} }
@ -154,7 +155,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
void* tag_; void* tag_;
void* core_cq_tag_; void* core_cq_tag_;
grpc_core::RefCount refs_; grpc_core::RefCount refs_;
std::mutex mu_; grpc_core::Mutex mu_;
bool finalized_; bool finalized_;
int cancelled_; // This is an int (not bool) because it is passed to core int cancelled_; // This is an int (not bool) because it is passed to core
std::function<void()> cancel_callback_; std::function<void()> cancel_callback_;
@ -186,7 +187,7 @@ void ServerContext::CompletionOp::FillOps(internal::Call* call) {
bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) { bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
bool ret = false; bool ret = false;
std::unique_lock<std::mutex> lock(mu_); grpc_core::ReleasableMutexLock lock(&mu_);
if (done_intercepting_) { if (done_intercepting_) {
/* We are done intercepting. */ /* We are done intercepting. */
if (has_tag_) { if (has_tag_) {
@ -218,14 +219,12 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
cancel_callback_(); cancel_callback_();
} }
// Release the lock since we are going to be calling a callback and // Release the lock since we may call a callback and interceptors now.
// interceptors now lock.Unlock();
lock.unlock();
if (call_cancel && reactor_ != nullptr) { if (call_cancel && reactor_ != nullptr) {
reactor_->MaybeCallOnCancel(); reactor_->MaybeCallOnCancel();
} }
/* Add interception point and run through interceptors */ /* Add interception point and run through interceptors */
interceptor_methods_.AddInterceptionHookPoint( interceptor_methods_.AddInterceptionHookPoint(
experimental::InterceptionHookPoints::POST_RECV_CLOSE); experimental::InterceptionHookPoints::POST_RECV_CLOSE);

@ -62,7 +62,7 @@ ThreadManager::ThreadManager(const char* name,
ThreadManager::~ThreadManager() { ThreadManager::~ThreadManager() {
{ {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
GPR_ASSERT(num_threads_ == 0); GPR_ASSERT(num_threads_ == 0);
} }
@ -72,38 +72,38 @@ ThreadManager::~ThreadManager() {
} }
void ThreadManager::Wait() { void ThreadManager::Wait() {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
while (num_threads_ != 0) { while (num_threads_ != 0) {
shutdown_cv_.wait(lock); shutdown_cv_.Wait(&mu_);
} }
} }
void ThreadManager::Shutdown() { void ThreadManager::Shutdown() {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
shutdown_ = true; shutdown_ = true;
} }
bool ThreadManager::IsShutdown() { bool ThreadManager::IsShutdown() {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
return shutdown_; return shutdown_;
} }
int ThreadManager::GetMaxActiveThreadsSoFar() { int ThreadManager::GetMaxActiveThreadsSoFar() {
std::lock_guard<std::mutex> list_lock(list_mu_); grpc_core::MutexLock list_lock(&list_mu_);
return max_active_threads_sofar_; return max_active_threads_sofar_;
} }
void ThreadManager::MarkAsCompleted(WorkerThread* thd) { void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
{ {
std::lock_guard<std::mutex> list_lock(list_mu_); grpc_core::MutexLock list_lock(&list_mu_);
completed_threads_.push_back(thd); completed_threads_.push_back(thd);
} }
{ {
std::lock_guard<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
num_threads_--; num_threads_--;
if (num_threads_ == 0) { if (num_threads_ == 0) {
shutdown_cv_.notify_one(); shutdown_cv_.Signal();
} }
} }
@ -116,7 +116,7 @@ void ThreadManager::CleanupCompletedThreads() {
{ {
// swap out the completed threads list: allows other threads to clean up // swap out the completed threads list: allows other threads to clean up
// more quickly // more quickly
std::unique_lock<std::mutex> lock(list_mu_); grpc_core::MutexLock lock(&list_mu_);
completed_threads.swap(completed_threads_); completed_threads.swap(completed_threads_);
} }
for (auto thd : completed_threads) delete thd; for (auto thd : completed_threads) delete thd;
@ -132,7 +132,7 @@ void ThreadManager::Initialize() {
} }
{ {
std::unique_lock<std::mutex> lock(mu_); grpc_core::MutexLock lock(&mu_);
num_pollers_ = min_pollers_; num_pollers_ = min_pollers_;
num_threads_ = min_pollers_; num_threads_ = min_pollers_;
max_active_threads_sofar_ = min_pollers_; max_active_threads_sofar_ = min_pollers_;
@ -149,7 +149,7 @@ void ThreadManager::MainWorkLoop() {
bool ok; bool ok;
WorkStatus work_status = PollForWork(&tag, &ok); WorkStatus work_status = PollForWork(&tag, &ok);
std::unique_lock<std::mutex> lock(mu_); grpc_core::ReleasableMutexLock lock(&mu_);
// Reduce the number of pollers by 1 and check what happened with the poll // Reduce the number of pollers by 1 and check what happened with the poll
num_pollers_--; num_pollers_--;
bool done = false; bool done = false;
@ -176,30 +176,30 @@ void ThreadManager::MainWorkLoop() {
max_active_threads_sofar_ = num_threads_; max_active_threads_sofar_ = num_threads_;
} }
// Drop lock before spawning thread to avoid contention // Drop lock before spawning thread to avoid contention
lock.unlock(); lock.Unlock();
new WorkerThread(this); new WorkerThread(this);
} else if (num_pollers_ > 0) { } else if (num_pollers_ > 0) {
// There is still at least some thread polling, so we can go on // There is still at least some thread polling, so we can go on
// even though we are below the number of pollers that we would // even though we are below the number of pollers that we would
// like to have (min_pollers_) // like to have (min_pollers_)
lock.unlock(); lock.Unlock();
} else { } else {
// There are no pollers to spare and we couldn't allocate // There are no pollers to spare and we couldn't allocate
// a new thread, so resources are exhausted! // a new thread, so resources are exhausted!
lock.unlock(); lock.Unlock();
resource_exhausted = true; resource_exhausted = true;
} }
} else { } else {
// There are a sufficient number of pollers available so we can do // There are a sufficient number of pollers available so we can do
// the work and continue polling with our existing poller threads // the work and continue polling with our existing poller threads
lock.unlock(); lock.Unlock();
} }
// Lock is always released at this point - do the application work // Lock is always released at this point - do the application work
// or return resource exhausted if there is new work but we couldn't // or return resource exhausted if there is new work but we couldn't
// get a thread in which to do it. // get a thread in which to do it.
DoWork(tag, ok, !resource_exhausted); DoWork(tag, ok, !resource_exhausted);
// Take the lock again to check post conditions // Take the lock again to check post conditions
lock.lock(); lock.Lock();
// If we're shutdown, we should finish at this point. // If we're shutdown, we should finish at this point.
if (shutdown_) done = true; if (shutdown_) done = true;
break; break;

@ -26,6 +26,7 @@
#include <grpcpp/support/config.h> #include <grpcpp/support/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/resource_quota.h" #include "src/core/lib/iomgr/resource_quota.h"
@ -140,10 +141,10 @@ class ThreadManager {
// Protects shutdown_, num_pollers_, num_threads_ and // Protects shutdown_, num_pollers_, num_threads_ and
// max_active_threads_sofar_ // max_active_threads_sofar_
std::mutex mu_; grpc_core::Mutex mu_;
bool shutdown_; bool shutdown_;
std::condition_variable shutdown_cv_; grpc_core::CondVar shutdown_cv_;
// The resource user object to use when requesting quota to create threads // The resource user object to use when requesting quota to create threads
// //
@ -169,7 +170,7 @@ class ThreadManager {
// ever set so far // ever set so far
int max_active_threads_sofar_; int max_active_threads_sofar_;
std::mutex list_mu_; grpc_core::Mutex list_mu_;
std::list<WorkerThread*> completed_threads_; std::list<WorkerThread*> completed_threads_;
}; };

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *
cdef _interpret_event(grpc_event c_event) cdef _interpret_event(grpc_event c_event)

@ -20,7 +20,7 @@ import time
cdef int _INTERRUPT_CHECK_PERIOD_MS = 200 cdef int _INTERRUPT_CHECK_PERIOD_MS = 200
cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline): cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *:
cdef gpr_timespec c_increment cdef gpr_timespec c_increment
cdef gpr_timespec c_timeout cdef gpr_timespec c_timeout
cdef gpr_timespec c_deadline cdef gpr_timespec c_deadline

@ -154,6 +154,9 @@ class TestGevent(setuptools.Command):
# TODO(https://github.com/grpc/grpc/issues/15411) enable this test # TODO(https://github.com/grpc/grpc/issues/15411) enable this test
'unit._cython._channel_test.ChannelTest.test_negative_deadline_connectivity' 'unit._cython._channel_test.ChannelTest.test_negative_deadline_connectivity'
) )
BANNED_WINDOWS_TESTS = (
# TODO(https://github.com/grpc/grpc/pull/15411) enable this test
'unit._dns_resolver_test.DNSResolverTest.test_connect_loopback',)
description = 'run tests with gevent. Assumes grpc/gevent are installed' description = 'run tests with gevent. Assumes grpc/gevent are installed'
user_options = [] user_options = []
@ -179,7 +182,10 @@ class TestGevent(setuptools.Command):
loader = tests.Loader() loader = tests.Loader()
loader.loadTestsFromNames(['tests']) loader.loadTestsFromNames(['tests'])
runner = tests.Runner() runner = tests.Runner()
runner.skip_tests(self.BANNED_TESTS) if sys.platform == 'win32':
runner.skip_tests(self.BANNED_TESTS + self.BANNED_WINDOWS_TESTS)
else:
runner.skip_tests(self.BANNED_TESTS)
result = gevent.spawn(runner.run, loader.suite) result = gevent.spawn(runner.run, loader.suite)
result.join() result.join()
if not result.value.wasSuccessful(): if not result.value.wasSuccessful():

@ -46,6 +46,7 @@
"unit._cython.cygrpc_test.InsecureServerInsecureClient", "unit._cython.cygrpc_test.InsecureServerInsecureClient",
"unit._cython.cygrpc_test.SecureServerSecureClient", "unit._cython.cygrpc_test.SecureServerSecureClient",
"unit._cython.cygrpc_test.TypeSmokeTest", "unit._cython.cygrpc_test.TypeSmokeTest",
"unit._dns_resolver_test.DNSResolverTest",
"unit._empty_message_test.EmptyMessageTest", "unit._empty_message_test.EmptyMessageTest",
"unit._error_message_encoding_test.ErrorMessageEncodingTest", "unit._error_message_encoding_test.ErrorMessageEncodingTest",
"unit._exit_test.ExitTest", "unit._exit_test.ExitTest",

@ -14,6 +14,7 @@ GRPCIO_TESTS_UNIT = [
"_channel_ready_future_test.py", "_channel_ready_future_test.py",
"_compression_test.py", "_compression_test.py",
"_credentials_test.py", "_credentials_test.py",
"_dns_resolver_test.py",
"_empty_message_test.py", "_empty_message_test.py",
"_exit_test.py", "_exit_test.py",
"_interceptor_test.py", "_interceptor_test.py",

@ -0,0 +1,63 @@
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for an actual dns resolution."""
import unittest
import logging
import six
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_METHOD = '/ANY/METHOD'
_REQUEST = b'\x00\x00\x00'
_RESPONSE = _REQUEST
class GenericHandler(grpc.GenericRpcHandler):
def service(self, unused_handler_details):
return grpc.unary_unary_rpc_method_handler(
lambda request, unused_context: request,
)
class DNSResolverTest(unittest.TestCase):
def setUp(self):
self._server = test_common.test_server()
self._server.add_generic_rpc_handlers((GenericHandler(),))
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
def tearDown(self):
self._server.stop(None)
def test_connect_loopback(self):
# NOTE(https://github.com/grpc/grpc/issues/18422)
# In short, Gevent + C-Ares = Segfault. The C-Ares driver is not
# supported by custom io manager like "gevent" or "libuv".
with grpc.insecure_channel(
'loopback4.unittest.grpc.io:%d' % self._port) as channel:
self.assertEqual(
channel.unary_unary(_METHOD)(
_REQUEST,
timeout=test_constants.SHORT_TIMEOUT,
), _RESPONSE)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

@ -31,6 +31,7 @@
#include <grpcpp/channel.h> #include <grpcpp/channel.h>
#include <grpcpp/client_context.h> #include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h> #include <grpcpp/create_channel.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/server.h> #include <grpcpp/server.h>
#include <grpcpp/server_builder.h> #include <grpcpp/server_builder.h>
@ -168,24 +169,24 @@ class ClientChannelStressTest {
explicit ServerThread(const grpc::string& type, explicit ServerThread(const grpc::string& type,
const grpc::string& server_host, T* service) const grpc::string& server_host, T* service)
: type_(type), service_(service) { : type_(type), service_(service) {
std::mutex mu; grpc::internal::Mutex mu;
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// by ServerThread::Start from firing before the wait below is hit. // by ServerThread::Start from firing before the wait below is hit.
std::unique_lock<std::mutex> lock(mu); grpc::internal::MutexLock lock(&mu);
port_ = grpc_pick_unused_port_or_die(); port_ = grpc_pick_unused_port_or_die();
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
std::condition_variable cond; grpc::internal::CondVar cond;
thread_.reset(new std::thread( thread_.reset(new std::thread(
std::bind(&ServerThread::Start, this, server_host, &mu, &cond))); std::bind(&ServerThread::Start, this, server_host, &mu, &cond)));
cond.wait(lock); cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
} }
void Start(const grpc::string& server_host, std::mutex* mu, void Start(const grpc::string& server_host, grpc::internal::Mutex* mu,
std::condition_variable* cond) { grpc::internal::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed. // below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(*mu); grpc::internal::MutexLock lock(mu);
std::ostringstream server_address; std::ostringstream server_address;
server_address << server_host << ":" << port_; server_address << server_host << ":" << port_;
ServerBuilder builder; ServerBuilder builder;
@ -193,7 +194,7 @@ class ClientChannelStressTest {
InsecureServerCredentials()); InsecureServerCredentials());
builder.RegisterService(service_); builder.RegisterService(service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
cond->notify_one(); cond->Signal();
} }
void Shutdown() { void Shutdown() {

@ -33,6 +33,7 @@
#include <grpcpp/client_context.h> #include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h> #include <grpcpp/create_channel.h>
#include <grpcpp/health_check_service_interface.h> #include <grpcpp/health_check_service_interface.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/server.h> #include <grpcpp/server.h>
#include <grpcpp/server_builder.h> #include <grpcpp/server_builder.h>
@ -98,7 +99,7 @@ class MyTestServiceImpl : public TestServiceImpl {
Status Echo(ServerContext* context, const EchoRequest* request, Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override { EchoResponse* response) override {
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
++request_count_; ++request_count_;
} }
AddClient(context->peer()); AddClient(context->peer());
@ -106,29 +107,29 @@ class MyTestServiceImpl : public TestServiceImpl {
} }
int request_count() { int request_count() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return request_count_; return request_count_;
} }
void ResetCounters() { void ResetCounters() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
request_count_ = 0; request_count_ = 0;
} }
std::set<grpc::string> clients() { std::set<grpc::string> clients() {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
return clients_; return clients_;
} }
private: private:
void AddClient(const grpc::string& client) { void AddClient(const grpc::string& client) {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client); clients_.insert(client);
} }
std::mutex mu_; grpc::internal::Mutex mu_;
int request_count_; int request_count_;
std::mutex clients_mu_; grpc::internal::Mutex clients_mu_;
std::set<grpc::string> clients_; std::set<grpc::string> clients_;
}; };
@ -293,18 +294,18 @@ class ClientLbEnd2endTest : public ::testing::Test {
void Start(const grpc::string& server_host) { void Start(const grpc::string& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_); gpr_log(GPR_INFO, "starting server on port %d", port_);
started_ = true; started_ = true;
std::mutex mu; grpc::internal::Mutex mu;
std::unique_lock<std::mutex> lock(mu); grpc::internal::MutexLock lock(&mu);
std::condition_variable cond; grpc::internal::CondVar cond;
thread_.reset(new std::thread( thread_.reset(new std::thread(
std::bind(&ServerData::Serve, this, server_host, &mu, &cond))); std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
cond.wait(lock, [this] { return server_ready_; }); cond.WaitUntil(&mu, [this] { return server_ready_; });
server_ready_ = false; server_ready_ = false;
gpr_log(GPR_INFO, "server startup complete"); gpr_log(GPR_INFO, "server startup complete");
} }
void Serve(const grpc::string& server_host, std::mutex* mu, void Serve(const grpc::string& server_host, grpc::internal::Mutex* mu,
std::condition_variable* cond) { grpc::internal::CondVar* cond) {
std::ostringstream server_address; std::ostringstream server_address;
server_address << server_host << ":" << port_; server_address << server_host << ":" << port_;
ServerBuilder builder; ServerBuilder builder;
@ -313,9 +314,9 @@ class ClientLbEnd2endTest : public ::testing::Test {
builder.AddListeningPort(server_address.str(), std::move(creds)); builder.AddListeningPort(server_address.str(), std::move(creds));
builder.RegisterService(&service_); builder.RegisterService(&service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
std::lock_guard<std::mutex> lock(*mu); grpc::internal::MutexLock lock(mu);
server_ready_ = true; server_ready_ = true;
cond->notify_one(); cond->Signal();
} }
void Shutdown() { void Shutdown() {
@ -1374,7 +1375,7 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
void TearDown() override { ClientLbEnd2endTest::TearDown(); } void TearDown() override { ClientLbEnd2endTest::TearDown(); }
int trailers_intercepted() { int trailers_intercepted() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return trailers_intercepted_; return trailers_intercepted_;
} }
@ -1382,11 +1383,11 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
static void ReportTrailerIntercepted(void* arg) { static void ReportTrailerIntercepted(void* arg) {
ClientLbInterceptTrailingMetadataTest* self = ClientLbInterceptTrailingMetadataTest* self =
static_cast<ClientLbInterceptTrailingMetadataTest*>(arg); static_cast<ClientLbInterceptTrailingMetadataTest*>(arg);
std::unique_lock<std::mutex> lock(self->mu_); grpc::internal::MutexLock lock(&self->mu_);
self->trailers_intercepted_++; self->trailers_intercepted_++;
} }
std::mutex mu_; grpc::internal::Mutex mu_;
int trailers_intercepted_ = 0; int trailers_intercepted_ = 0;
}; };

@ -30,6 +30,7 @@
#include <grpcpp/channel.h> #include <grpcpp/channel.h>
#include <grpcpp/client_context.h> #include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h> #include <grpcpp/create_channel.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/server.h> #include <grpcpp/server.h>
#include <grpcpp/server_builder.h> #include <grpcpp/server_builder.h>
@ -85,32 +86,32 @@ template <typename ServiceType>
class CountedService : public ServiceType { class CountedService : public ServiceType {
public: public:
size_t request_count() { size_t request_count() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return request_count_; return request_count_;
} }
size_t response_count() { size_t response_count() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return response_count_; return response_count_;
} }
void IncreaseResponseCount() { void IncreaseResponseCount() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
++response_count_; ++response_count_;
} }
void IncreaseRequestCount() { void IncreaseRequestCount() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
++request_count_; ++request_count_;
} }
void ResetCounters() { void ResetCounters() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
request_count_ = 0; request_count_ = 0;
response_count_ = 0; response_count_ = 0;
} }
protected: protected:
std::mutex mu_; grpc::internal::Mutex mu_;
private: private:
size_t request_count_ = 0; size_t request_count_ = 0;
@ -148,18 +149,18 @@ class BackendServiceImpl : public BackendService {
void Shutdown() {} void Shutdown() {}
std::set<grpc::string> clients() { std::set<grpc::string> clients() {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
return clients_; return clients_;
} }
private: private:
void AddClient(const grpc::string& client) { void AddClient(const grpc::string& client) {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client); clients_.insert(client);
} }
std::mutex mu_; grpc::internal::Mutex mu_;
std::mutex clients_mu_; grpc::internal::Mutex clients_mu_;
std::set<grpc::string> clients_; std::set<grpc::string> clients_;
}; };
@ -210,7 +211,7 @@ class BalancerServiceImpl : public BalancerService {
Status BalanceLoad(ServerContext* context, Stream* stream) override { Status BalanceLoad(ServerContext* context, Stream* stream) override {
gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this); gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
if (serverlist_done_) goto done; if (serverlist_done_) goto done;
} }
{ {
@ -237,7 +238,7 @@ class BalancerServiceImpl : public BalancerService {
} }
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
responses_and_delays = responses_and_delays_; responses_and_delays = responses_and_delays_;
} }
for (const auto& response_and_delay : responses_and_delays) { for (const auto& response_and_delay : responses_and_delays) {
@ -245,8 +246,8 @@ class BalancerServiceImpl : public BalancerService {
response_and_delay.second); response_and_delay.second);
} }
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
serverlist_cond_.wait(lock, [this] { return serverlist_done_; }); serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
} }
if (client_load_reporting_interval_seconds_ > 0) { if (client_load_reporting_interval_seconds_ > 0) {
@ -257,7 +258,7 @@ class BalancerServiceImpl : public BalancerService {
GPR_ASSERT(request.has_client_stats()); GPR_ASSERT(request.has_client_stats());
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed. // below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
client_stats_.num_calls_started += client_stats_.num_calls_started +=
request.client_stats().num_calls_started(); request.client_stats().num_calls_started();
client_stats_.num_calls_finished += client_stats_.num_calls_finished +=
@ -274,7 +275,7 @@ class BalancerServiceImpl : public BalancerService {
drop_token_count.num_calls(); drop_token_count.num_calls();
} }
load_report_ready_ = true; load_report_ready_ = true;
load_report_cond_.notify_one(); load_report_cond_.Signal();
} }
} }
} }
@ -284,12 +285,12 @@ class BalancerServiceImpl : public BalancerService {
} }
void add_response(const LoadBalanceResponse& response, int send_after_ms) { void add_response(const LoadBalanceResponse& response, int send_after_ms) {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
responses_and_delays_.push_back(std::make_pair(response, send_after_ms)); responses_and_delays_.push_back(std::make_pair(response, send_after_ms));
} }
void Start() { void Start() {
std::lock_guard<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
serverlist_done_ = false; serverlist_done_ = false;
load_report_ready_ = false; load_report_ready_ = false;
responses_and_delays_.clear(); responses_and_delays_.clear();
@ -326,17 +327,17 @@ class BalancerServiceImpl : public BalancerService {
} }
const ClientStats& WaitForLoadReport() { const ClientStats& WaitForLoadReport() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
load_report_cond_.wait(lock, [this] { return load_report_ready_; }); load_report_cond_.WaitUntil(&mu_, [this] { return load_report_ready_; });
load_report_ready_ = false; load_report_ready_ = false;
return client_stats_; return client_stats_;
} }
void NotifyDoneWithServerlists() { void NotifyDoneWithServerlists() {
std::lock_guard<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
if (!serverlist_done_) { if (!serverlist_done_) {
serverlist_done_ = true; serverlist_done_ = true;
serverlist_cond_.notify_all(); serverlist_cond_.Broadcast();
} }
} }
@ -355,10 +356,10 @@ class BalancerServiceImpl : public BalancerService {
const int client_load_reporting_interval_seconds_; const int client_load_reporting_interval_seconds_;
std::vector<ResponseDelayPair> responses_and_delays_; std::vector<ResponseDelayPair> responses_and_delays_;
std::mutex mu_; grpc::internal::Mutex mu_;
std::condition_variable load_report_cond_; grpc::internal::CondVar load_report_cond_;
bool load_report_ready_ = false; bool load_report_ready_ = false;
std::condition_variable serverlist_cond_; grpc::internal::CondVar serverlist_cond_;
bool serverlist_done_ = false; bool serverlist_done_ = false;
ClientStats client_stats_; ClientStats client_stats_;
}; };
@ -624,22 +625,22 @@ class GrpclbEnd2endTest : public ::testing::Test {
GPR_ASSERT(!running_); GPR_ASSERT(!running_);
running_ = true; running_ = true;
service_.Start(); service_.Start();
std::mutex mu; grpc::internal::Mutex mu;
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// by ServerThread::Serve from firing before the wait below is hit. // by ServerThread::Serve from firing before the wait below is hit.
std::unique_lock<std::mutex> lock(mu); grpc::internal::MutexLock lock(&mu);
std::condition_variable cond; grpc::internal::CondVar cond;
thread_.reset(new std::thread( thread_.reset(new std::thread(
std::bind(&ServerThread::Serve, this, server_host, &mu, &cond))); std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
cond.wait(lock); cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
} }
void Serve(const grpc::string& server_host, std::mutex* mu, void Serve(const grpc::string& server_host, grpc::internal::Mutex* mu,
std::condition_variable* cond) { grpc::internal::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed. // below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(*mu); grpc::internal::MutexLock lock(mu);
std::ostringstream server_address; std::ostringstream server_address;
server_address << server_host << ":" << port_; server_address << server_host << ":" << port_;
ServerBuilder builder; ServerBuilder builder;
@ -648,7 +649,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
builder.AddListeningPort(server_address.str(), creds); builder.AddListeningPort(server_address.str(), creds);
builder.RegisterService(&service_); builder.RegisterService(&service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
cond->notify_one(); cond->Signal();
} }
void Shutdown() { void Shutdown() {

@ -25,6 +25,7 @@
#include <grpcpp/channel.h> #include <grpcpp/channel.h>
#include <grpcpp/client_context.h> #include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h> #include <grpcpp/create_channel.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/resource_quota.h> #include <grpcpp/resource_quota.h>
#include <grpcpp/server.h> #include <grpcpp/server.h>
#include <grpcpp/server_builder.h> #include <grpcpp/server_builder.h>
@ -188,7 +189,7 @@ class CommonStressTestAsyncServer : public BaseClass {
} }
void TearDown() override { void TearDown() override {
{ {
std::unique_lock<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
this->TearDownStart(); this->TearDownStart();
shutting_down_ = true; shutting_down_ = true;
cq_->Shutdown(); cq_->Shutdown();
@ -229,7 +230,7 @@ class CommonStressTestAsyncServer : public BaseClass {
} }
} }
void RefreshContext(int i) { void RefreshContext(int i) {
std::unique_lock<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
if (!shutting_down_) { if (!shutting_down_) {
contexts_[i].state = Context::READY; contexts_[i].state = Context::READY;
contexts_[i].srv_ctx.reset(new ServerContext); contexts_[i].srv_ctx.reset(new ServerContext);
@ -253,7 +254,7 @@ class CommonStressTestAsyncServer : public BaseClass {
::grpc::testing::EchoTestService::AsyncService service_; ::grpc::testing::EchoTestService::AsyncService service_;
std::unique_ptr<ServerCompletionQueue> cq_; std::unique_ptr<ServerCompletionQueue> cq_;
bool shutting_down_; bool shutting_down_;
std::mutex mu_; grpc::internal::Mutex mu_;
std::vector<std::thread> server_threads_; std::vector<std::thread> server_threads_;
}; };
@ -341,9 +342,9 @@ class AsyncClientEnd2endTest : public ::testing::Test {
} }
void Wait() { void Wait() {
std::unique_lock<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
while (rpcs_outstanding_ != 0) { while (rpcs_outstanding_ != 0) {
cv_.wait(l); cv_.Wait(&mu_);
} }
cq_.Shutdown(); cq_.Shutdown();
@ -366,7 +367,7 @@ class AsyncClientEnd2endTest : public ::testing::Test {
call->response_reader->Finish(&call->response, &call->status, call->response_reader->Finish(&call->response, &call->status,
(void*)call); (void*)call);
std::unique_lock<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
rpcs_outstanding_++; rpcs_outstanding_++;
} }
} }
@ -384,20 +385,20 @@ class AsyncClientEnd2endTest : public ::testing::Test {
bool notify; bool notify;
{ {
std::unique_lock<std::mutex> l(mu_); grpc::internal::MutexLock l(&mu_);
rpcs_outstanding_--; rpcs_outstanding_--;
notify = (rpcs_outstanding_ == 0); notify = (rpcs_outstanding_ == 0);
} }
if (notify) { if (notify) {
cv_.notify_all(); cv_.Signal();
} }
} }
} }
Common common_; Common common_;
CompletionQueue cq_; CompletionQueue cq_;
std::mutex mu_; grpc::internal::Mutex mu_;
std::condition_variable cv_; grpc::internal::CondVar cv_;
int rpcs_outstanding_; int rpcs_outstanding_;
}; };

@ -84,32 +84,32 @@ template <typename ServiceType>
class CountedService : public ServiceType { class CountedService : public ServiceType {
public: public:
size_t request_count() { size_t request_count() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return request_count_; return request_count_;
} }
size_t response_count() { size_t response_count() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
return response_count_; return response_count_;
} }
void IncreaseResponseCount() { void IncreaseResponseCount() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
++response_count_; ++response_count_;
} }
void IncreaseRequestCount() { void IncreaseRequestCount() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
++request_count_; ++request_count_;
} }
void ResetCounters() { void ResetCounters() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
request_count_ = 0; request_count_ = 0;
response_count_ = 0; response_count_ = 0;
} }
protected: protected:
std::mutex mu_; grpc::internal::Mutex mu_;
private: private:
size_t request_count_ = 0; size_t request_count_ = 0;
@ -145,18 +145,18 @@ class BackendServiceImpl : public BackendService {
void Shutdown() {} void Shutdown() {}
std::set<grpc::string> clients() { std::set<grpc::string> clients() {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
return clients_; return clients_;
} }
private: private:
void AddClient(const grpc::string& client) { void AddClient(const grpc::string& client) {
std::unique_lock<std::mutex> lock(clients_mu_); grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client); clients_.insert(client);
} }
std::mutex mu_; grpc::internal::Mutex mu_;
std::mutex clients_mu_; grpc::internal::Mutex clients_mu_;
std::set<grpc::string> clients_; std::set<grpc::string> clients_;
}; };
@ -208,7 +208,7 @@ class BalancerServiceImpl : public BalancerService {
// TODO(juanlishen): Clean up the scoping. // TODO(juanlishen): Clean up the scoping.
gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this); gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
if (serverlist_done_) goto done; if (serverlist_done_) goto done;
} }
{ {
@ -234,7 +234,7 @@ class BalancerServiceImpl : public BalancerService {
} }
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
responses_and_delays = responses_and_delays_; responses_and_delays = responses_and_delays_;
} }
for (const auto& response_and_delay : responses_and_delays) { for (const auto& response_and_delay : responses_and_delays) {
@ -242,8 +242,8 @@ class BalancerServiceImpl : public BalancerService {
response_and_delay.second); response_and_delay.second);
} }
{ {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
serverlist_cond_.wait(lock, [this] { return serverlist_done_; }); serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
} }
if (client_load_reporting_interval_seconds_ > 0) { if (client_load_reporting_interval_seconds_ > 0) {
@ -254,7 +254,7 @@ class BalancerServiceImpl : public BalancerService {
GPR_ASSERT(request.has_client_stats()); GPR_ASSERT(request.has_client_stats());
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed. // below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
client_stats_.num_calls_started += client_stats_.num_calls_started +=
request.client_stats().num_calls_started(); request.client_stats().num_calls_started();
client_stats_.num_calls_finished += client_stats_.num_calls_finished +=
@ -271,7 +271,7 @@ class BalancerServiceImpl : public BalancerService {
drop_token_count.num_calls(); drop_token_count.num_calls();
} }
load_report_ready_ = true; load_report_ready_ = true;
load_report_cond_.notify_one(); load_report_cond_.Signal();
} }
} }
} }
@ -281,12 +281,12 @@ class BalancerServiceImpl : public BalancerService {
} }
void add_response(const LoadBalanceResponse& response, int send_after_ms) { void add_response(const LoadBalanceResponse& response, int send_after_ms) {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
responses_and_delays_.push_back(std::make_pair(response, send_after_ms)); responses_and_delays_.push_back(std::make_pair(response, send_after_ms));
} }
void Shutdown() { void Shutdown() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
NotifyDoneWithServerlistsLocked(); NotifyDoneWithServerlistsLocked();
responses_and_delays_.clear(); responses_and_delays_.clear();
client_stats_.Reset(); client_stats_.Reset();
@ -318,21 +318,21 @@ class BalancerServiceImpl : public BalancerService {
} }
const ClientStats& WaitForLoadReport() { const ClientStats& WaitForLoadReport() {
std::unique_lock<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
load_report_cond_.wait(lock, [this] { return load_report_ready_; }); load_report_cond_.WaitUntil(&mu_, [this] { return load_report_ready_; });
load_report_ready_ = false; load_report_ready_ = false;
return client_stats_; return client_stats_;
} }
void NotifyDoneWithServerlists() { void NotifyDoneWithServerlists() {
std::lock_guard<std::mutex> lock(mu_); grpc::internal::MutexLock lock(&mu_);
NotifyDoneWithServerlistsLocked(); NotifyDoneWithServerlistsLocked();
} }
void NotifyDoneWithServerlistsLocked() { void NotifyDoneWithServerlistsLocked() {
if (!serverlist_done_) { if (!serverlist_done_) {
serverlist_done_ = true; serverlist_done_ = true;
serverlist_cond_.notify_all(); serverlist_cond_.Broadcast();
} }
} }
@ -351,10 +351,10 @@ class BalancerServiceImpl : public BalancerService {
const int client_load_reporting_interval_seconds_; const int client_load_reporting_interval_seconds_;
std::vector<ResponseDelayPair> responses_and_delays_; std::vector<ResponseDelayPair> responses_and_delays_;
std::mutex mu_; grpc::internal::Mutex mu_;
std::condition_variable load_report_cond_; grpc::internal::CondVar load_report_cond_;
bool load_report_ready_ = false; bool load_report_ready_ = false;
std::condition_variable serverlist_cond_; grpc::internal::CondVar serverlist_cond_;
bool serverlist_done_ = false; bool serverlist_done_ = false;
ClientStats client_stats_; ClientStats client_stats_;
}; };
@ -637,22 +637,22 @@ class XdsEnd2endTest : public ::testing::Test {
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
GPR_ASSERT(!running_); GPR_ASSERT(!running_);
running_ = true; running_ = true;
std::mutex mu; grpc::internal::Mutex mu;
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// by ServerThread::Serve from firing before the wait below is hit. // by ServerThread::Serve from firing before the wait below is hit.
std::unique_lock<std::mutex> lock(mu); grpc::internal::MutexLock lock(&mu);
std::condition_variable cond; grpc::internal::CondVar cond;
thread_.reset(new std::thread( thread_.reset(new std::thread(
std::bind(&ServerThread::Serve, this, server_host, &mu, &cond))); std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
cond.wait(lock); cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
} }
void Serve(const grpc::string& server_host, std::mutex* mu, void Serve(const grpc::string& server_host, grpc::internal::Mutex* mu,
std::condition_variable* cond) { grpc::internal::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one // We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed. // below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(*mu); grpc::internal::MutexLock lock(mu);
std::ostringstream server_address; std::ostringstream server_address;
server_address << server_host << ":" << port_; server_address << server_host << ":" << port_;
ServerBuilder builder; ServerBuilder builder;
@ -661,7 +661,7 @@ class XdsEnd2endTest : public ::testing::Test {
builder.AddListeningPort(server_address.str(), creds); builder.AddListeningPort(server_address.str(), creds);
builder.RegisterService(&service_); builder.RegisterService(&service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
cond->notify_one(); cond->Signal();
} }
void Shutdown() { void Shutdown() {

@ -44,7 +44,7 @@ PROJECT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..'))
CONFIG = args.config CONFIG = args.config
SETUP_PATH = os.path.join(PROJECT_ROOT, 'setup.py') SETUP_PATH = os.path.join(PROJECT_ROOT, 'setup.py')
REQUIREMENTS_PATH = os.path.join(PROJECT_ROOT, 'requirements.txt') REQUIREMENTS_PATH = os.path.join(PROJECT_ROOT, 'requirements.bazel.txt')
DOC_PATH = os.path.join(PROJECT_ROOT, 'doc/build') DOC_PATH = os.path.join(PROJECT_ROOT, 'doc/build')
INCLUDE_PATH = os.path.join(PROJECT_ROOT, 'include') INCLUDE_PATH = os.path.join(PROJECT_ROOT, 'include')
LIBRARY_PATH = os.path.join(PROJECT_ROOT, 'libs/{}'.format(CONFIG)) LIBRARY_PATH = os.path.join(PROJECT_ROOT, 'libs/{}'.format(CONFIG))

@ -29,11 +29,11 @@ if [ "x$1" == 'x--pre-commit' ]; then
fi fi
fi fi
CHANGED_FILES=$(eval $DIFF_COMMAND) ./tools/distrib/clang_format_code.sh CHANGED_FILES=$(eval $DIFF_COMMAND) ./tools/distrib/clang_format_code.sh
./tools/distrib/check_copyright.py --fix --precommit ./tools/distrib/check_copyright.py --precommit
./tools/distrib/check_trailing_newlines.sh ./tools/distrib/check_trailing_newlines.sh
else else
./tools/buildgen/generate_projects.sh ./tools/buildgen/generate_projects.sh
./tools/distrib/clang_format_code.sh ./tools/distrib/clang_format_code.sh
./tools/distrib/check_copyright.py --fix ./tools/distrib/check_copyright.py
./tools/distrib/check_trailing_newlines.sh ./tools/distrib/check_trailing_newlines.sh
fi fi

@ -987,6 +987,7 @@ include/grpcpp/impl/codegen/status.h \
include/grpcpp/impl/codegen/status_code_enum.h \ include/grpcpp/impl/codegen/status_code_enum.h \
include/grpcpp/impl/codegen/string_ref.h \ include/grpcpp/impl/codegen/string_ref.h \
include/grpcpp/impl/codegen/stub_options.h \ include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync.h \
include/grpcpp/impl/codegen/sync_stream.h \ include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \ include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/grpc_library.h \ include/grpcpp/impl/grpc_library.h \

@ -989,6 +989,7 @@ include/grpcpp/impl/codegen/status.h \
include/grpcpp/impl/codegen/status_code_enum.h \ include/grpcpp/impl/codegen/status_code_enum.h \
include/grpcpp/impl/codegen/string_ref.h \ include/grpcpp/impl/codegen/string_ref.h \
include/grpcpp/impl/codegen/stub_options.h \ include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync.h \
include/grpcpp/impl/codegen/sync_stream.h \ include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \ include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/grpc_library.h \ include/grpcpp/impl/grpc_library.h \
@ -1086,12 +1087,12 @@ src/core/lib/gprpp/inlined_vector.h \
src/core/lib/gprpp/manual_constructor.h \ src/core/lib/gprpp/manual_constructor.h \
src/core/lib/gprpp/map.h \ src/core/lib/gprpp/map.h \
src/core/lib/gprpp/memory.h \ src/core/lib/gprpp/memory.h \
src/core/lib/gprpp/mutex_lock.h \
src/core/lib/gprpp/optional.h \ src/core/lib/gprpp/optional.h \
src/core/lib/gprpp/orphanable.h \ src/core/lib/gprpp/orphanable.h \
src/core/lib/gprpp/pair.h \ src/core/lib/gprpp/pair.h \
src/core/lib/gprpp/ref_counted.h \ src/core/lib/gprpp/ref_counted.h \
src/core/lib/gprpp/ref_counted_ptr.h \ src/core/lib/gprpp/ref_counted_ptr.h \
src/core/lib/gprpp/sync.h \
src/core/lib/gprpp/thd.h \ src/core/lib/gprpp/thd.h \
src/core/lib/http/format_request.h \ src/core/lib/http/format_request.h \
src/core/lib/http/httpcli.h \ src/core/lib/http/httpcli.h \

@ -1168,12 +1168,12 @@ src/core/lib/gprpp/inlined_vector.h \
src/core/lib/gprpp/manual_constructor.h \ src/core/lib/gprpp/manual_constructor.h \
src/core/lib/gprpp/map.h \ src/core/lib/gprpp/map.h \
src/core/lib/gprpp/memory.h \ src/core/lib/gprpp/memory.h \
src/core/lib/gprpp/mutex_lock.h \
src/core/lib/gprpp/optional.h \ src/core/lib/gprpp/optional.h \
src/core/lib/gprpp/orphanable.h \ src/core/lib/gprpp/orphanable.h \
src/core/lib/gprpp/pair.h \ src/core/lib/gprpp/pair.h \
src/core/lib/gprpp/ref_counted.h \ src/core/lib/gprpp/ref_counted.h \
src/core/lib/gprpp/ref_counted_ptr.h \ src/core/lib/gprpp/ref_counted_ptr.h \
src/core/lib/gprpp/sync.h \
src/core/lib/gprpp/thd.h \ src/core/lib/gprpp/thd.h \
src/core/lib/gprpp/thd_posix.cc \ src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \ src/core/lib/gprpp/thd_windows.cc \

@ -8050,8 +8050,8 @@
"src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h", "src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h", "src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h", "src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h", "src/core/lib/gprpp/thd.h",
"src/core/lib/profiling/timers.h" "src/core/lib/profiling/timers.h"
], ],
@ -8098,8 +8098,8 @@
"src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h", "src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h", "src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h", "src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h", "src/core/lib/gprpp/thd.h",
"src/core/lib/profiling/timers.h" "src/core/lib/profiling/timers.h"
], ],
@ -9880,6 +9880,7 @@
}, },
{ {
"deps": [ "deps": [
"grpc++_internal_hdrs_only",
"grpc_codegen" "grpc_codegen"
], ],
"headers": [ "headers": [
@ -10076,6 +10077,7 @@
"gpr", "gpr",
"gpr_base_headers", "gpr_base_headers",
"grpc++_codegen_base", "grpc++_codegen_base",
"grpc++_internal_hdrs_only",
"grpc_base_headers", "grpc_base_headers",
"grpc_transport_inproc_headers", "grpc_transport_inproc_headers",
"health_proto", "health_proto",
@ -10370,6 +10372,20 @@
"third_party": false, "third_party": false,
"type": "filegroup" "type": "filegroup"
}, },
{
"deps": [],
"headers": [
"include/grpcpp/impl/codegen/sync.h"
],
"is_filegroup": true,
"language": "c++",
"name": "grpc++_internal_hdrs_only",
"src": [
"include/grpcpp/impl/codegen/sync.h"
],
"third_party": false,
"type": "filegroup"
},
{ {
"deps": [], "deps": [],
"headers": [ "headers": [

Loading…
Cancel
Save