Merge remote-tracking branch 'upstream/master' into disable_fork_support

pull/14079/head
Ken Payson 7 years ago
commit f91d4ef78b
  1. 2
      .github/CODEOWNERS
  2. 2
      .gitignore
  3. 28
      BUILD
  4. 3993
      CMakeLists.txt
  5. 3
      CONTRIBUTING.md
  6. 129
      Makefile
  7. 1
      bazel/OWNERS
  8. 11
      bazel/grpc_build_system.bzl
  9. 34
      build.yaml
  10. 5
      cmake/benchmark.cmake
  11. 5
      cmake/cares.cmake
  12. 14
      cmake/gflags.cmake
  13. 18
      cmake/protobuf.cmake
  14. 15
      cmake/zlib.cmake
  15. 5
      doc/environment_variables.md
  16. 25
      examples/cpp/helloworld/CMakeLists.txt
  17. 5
      gRPC-Core.podspec
  18. 1
      grpc.gemspec
  19. 2
      grpc.gyp
  20. 4
      include/grpc++/impl/codegen/byte_buffer.h
  21. 6
      include/grpc++/impl/codegen/completion_queue.h
  22. 54
      include/grpc++/impl/codegen/method_handler_impl.h
  23. 6
      include/grpc++/impl/codegen/server_context.h
  24. 21
      include/grpc++/server.h
  25. 19
      include/grpc++/server_builder.h
  26. 15
      include/grpc/impl/codegen/port_platform.h
  27. 1
      package.xml
  28. 75
      src/core/ext/filters/client_channel/client_channel.cc
  29. 95
      src/core/ext/filters/client_channel/lb_policy.cc
  30. 94
      src/core/ext/filters/client_channel/lb_policy.h
  31. 611
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  32. 156
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  33. 174
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  34. 9
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  35. 3
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  36. 297
      src/core/ext/filters/client_channel/subchannel.cc
  37. 79
      src/core/ext/filters/client_channel/subchannel.h
  38. 10
      src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
  39. 28
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  40. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  41. 25
      src/core/ext/transport/chttp2/transport/flow_control.cc
  42. 242
      src/core/ext/transport/chttp2/transport/flow_control.h
  43. 6
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  44. 10
      src/core/ext/transport/chttp2/transport/internal.h
  45. 7
      src/core/ext/transport/chttp2/transport/parsing.cc
  46. 2
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  47. 25
      src/core/lib/iomgr/ev_epoll1_linux.cc
  48. 10
      src/core/lib/iomgr/ev_epollex_linux.cc
  49. 10
      src/core/lib/iomgr/ev_epollsig_linux.cc
  50. 4
      src/core/lib/iomgr/ev_epollsig_linux.h
  51. 2
      src/core/lib/iomgr/is_epollexclusive_available.cc
  52. 7
      src/core/lib/iomgr/port.h
  53. 37
      src/core/lib/iomgr/udp_server.cc
  54. 1
      src/core/lib/iomgr/udp_server.h
  55. 2
      src/core/lib/security/transport/client_auth_filter.cc
  56. 5
      src/core/lib/support/abstract.h
  57. 171
      src/core/lib/support/orphanable.h
  58. 13
      src/core/lib/support/ref_counted.h
  59. 9
      src/core/lib/support/ref_counted_ptr.h
  60. 5
      src/core/lib/surface/call.cc
  61. 14
      src/cpp/client/secure_credentials.cc
  62. 2
      src/cpp/server/create_default_thread_pool.cc
  63. 54
      src/cpp/server/dynamic_thread_pool.cc
  64. 20
      src/cpp/server/dynamic_thread_pool.h
  65. 7
      src/cpp/server/secure_server_credentials.cc
  66. 7
      src/cpp/server/server_builder.cc
  67. 49
      src/cpp/server/server_cc.cc
  68. 4
      src/cpp/server/thread_pool_interface.h
  69. 54
      src/cpp/thread_manager/thread_manager.cc
  70. 29
      src/cpp/thread_manager/thread_manager.h
  71. 11
      src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
  72. 2
      src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
  73. 14
      src/objective-c/BoringSSL.podspec
  74. 12
      src/objective-c/README.md
  75. 2
      src/php/ext/grpc/channel.c
  76. 4
      src/php/tests/unit_tests/CallCredentials2Test.php
  77. 996
      src/python/grpcio/grpc/__init__.py
  78. 6
      src/python/grpcio/grpc/_server.py
  79. 200
      src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
  80. 4
      src/ruby/spec/generic/client_stub_spec.rb
  81. 11
      summerofcode/ideas.md
  82. 35
      templates/CMakeLists.txt.template
  83. 4
      templates/Makefile.template
  84. 2
      templates/gRPC-Core.podspec.template
  85. 2
      templates/tools/dockerfile/clang5.include
  86. 4
      templates/tools/dockerfile/grpc_clang_format/Dockerfile.template
  87. 24
      templates/tools/dockerfile/grpc_clang_tidy/Dockerfile.template
  88. 2
      templates/tools/dockerfile/test/sanity/Dockerfile.template
  89. 11
      test/core/end2end/dualstack_socket_test.cc
  90. 8
      test/core/end2end/end2end_nosec_tests.cc
  91. 8
      test/core/end2end/end2end_tests.cc
  92. 13
      test/core/end2end/fuzzers/api_fuzzer.cc
  93. BIN
      test/core/end2end/fuzzers/api_fuzzer_corpus/fuzz-input-d2ab5
  94. 1
      test/core/end2end/gen_build_yaml.py
  95. 1
      test/core/end2end/generate_tests.bzl
  96. 378
      test/core/end2end/tests/filter_status_code.cc
  97. 2
      test/core/fling/client.cc
  98. 6
      test/core/iomgr/ev_epollsig_linux_test.cc
  99. 6
      test/core/iomgr/pollset_set_test.cc
  100. 23
      test/core/iomgr/udp_server_test.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2,6 +2,6 @@
# Uses OWNERS files in different modules throughout the # Uses OWNERS files in different modules throughout the
# repository as the source of truth for module ownership. # repository as the source of truth for module ownership.
/**/OWNERS @markdroth @nicolasnoble @a11r /**/OWNERS @markdroth @nicolasnoble @a11r
/bazel/** @nicolasnoble @dgquintas @a11r /bazel/** @nicolasnoble @dgquintas @a11r @vjpai
/src/core/ext/filters/client_channel/** @markdroth @dgquintas @a11r /src/core/ext/filters/client_channel/** @markdroth @dgquintas @a11r
/tools/run_tests/performance/** @ncteisen @matt-kwong @ctiller /tools/run_tests/performance/** @ncteisen @matt-kwong @ctiller

2
.gitignore vendored

@ -130,3 +130,5 @@ bm_diff_new/
bm_diff_old/ bm_diff_old/
bm_*.json bm_*.json
# cmake build files
/cmake/build

28
BUILD

@ -38,6 +38,16 @@ config_setting(
values = {"define": "grpc_no_ares=true"}, values = {"define": "grpc_no_ares=true"},
) )
config_setting(
name = "grpc_allow_exceptions",
values = {"define": "GRPC_ALLOW_EXCEPTIONS=1"},
)
config_setting(
name = "grpc_disallow_exceptions",
values = {"define": "GRPC_ALLOW_EXCEPTIONS=0"},
)
config_setting( config_setting(
name = "remote_execution", name = "remote_execution",
values = {"define": "GRPC_PORT_ISOLATED_RUNTIME=1"}, values = {"define": "GRPC_PORT_ISOLATED_RUNTIME=1"},
@ -544,24 +554,34 @@ grpc_cc_library(
grpc_cc_library( grpc_cc_library(
name = "debug_location", name = "debug_location",
language = "c++",
public_hdrs = ["src/core/lib/support/debug_location.h"], public_hdrs = ["src/core/lib/support/debug_location.h"],
)
grpc_cc_library(
name = "orphanable",
language = "c++", language = "c++",
public_hdrs = ["src/core/lib/support/orphanable.h"],
deps = [
"debug_location",
"grpc_trace",
],
) )
grpc_cc_library( grpc_cc_library(
name = "ref_counted", name = "ref_counted",
public_hdrs = ["src/core/lib/support/ref_counted.h"],
language = "c++", language = "c++",
public_hdrs = ["src/core/lib/support/ref_counted.h"],
deps = [ deps = [
"grpc_trace",
"debug_location", "debug_location",
"grpc_trace",
], ],
) )
grpc_cc_library( grpc_cc_library(
name = "ref_counted_ptr", name = "ref_counted_ptr",
public_hdrs = ["src/core/lib/support/ref_counted_ptr.h"],
language = "c++", language = "c++",
public_hdrs = ["src/core/lib/support/ref_counted_ptr.h"],
) )
grpc_cc_library( grpc_cc_library(
@ -919,6 +939,8 @@ grpc_cc_library(
deps = [ deps = [
"grpc_base", "grpc_base",
"grpc_deadline_filter", "grpc_deadline_filter",
"ref_counted",
"ref_counted_ptr",
], ],
) )

File diff suppressed because it is too large Load Diff

@ -75,6 +75,9 @@ How to get your contributions merged smoothly and quickly.
`tools/buildgen/generate_projects.sh`, make changes to generated files a `tools/buildgen/generate_projects.sh`, make changes to generated files a
separate commit with commit message `regenerate projects`. Mixing changes separate commit with commit message `regenerate projects`. Mixing changes
to generated and hand-written files make your PR difficult to review. to generated and hand-written files make your PR difficult to review.
Note that running this script requires the installation of Python packages
`pyyaml` and `mako` (typically installed using `pip`) as well as a recent
version of [`go`](https://golang.org/doc/install#install).
- **All tests need to be passing** before your change can be merged. - **All tests need to be passing** before your change can be merged.
We recommend you **run tests locally** before creating your PR to catch We recommend you **run tests locally** before creating your PR to catch

@ -77,7 +77,6 @@ CC_opt = $(DEFAULT_CC)
CXX_opt = $(DEFAULT_CXX) CXX_opt = $(DEFAULT_CXX)
LD_opt = $(DEFAULT_CC) LD_opt = $(DEFAULT_CC)
LDXX_opt = $(DEFAULT_CXX) LDXX_opt = $(DEFAULT_CXX)
CXXFLAGS_opt = -fno-exceptions
CPPFLAGS_opt = -O2 CPPFLAGS_opt = -O2
DEFINES_opt = NDEBUG DEFINES_opt = NDEBUG
@ -95,7 +94,6 @@ CC_dbg = $(DEFAULT_CC)
CXX_dbg = $(DEFAULT_CXX) CXX_dbg = $(DEFAULT_CXX)
LD_dbg = $(DEFAULT_CC) LD_dbg = $(DEFAULT_CC)
LDXX_dbg = $(DEFAULT_CXX) LDXX_dbg = $(DEFAULT_CXX)
CXXFLAGS_dbg = -fno-exceptions
CPPFLAGS_dbg = -O0 CPPFLAGS_dbg = -O0
DEFINES_dbg = _DEBUG DEBUG DEFINES_dbg = _DEBUG DEBUG
@ -144,14 +142,14 @@ LDXX_asan-noleaks = clang++
CPPFLAGS_asan-noleaks = -O0 -fsanitize-coverage=edge -fsanitize=address -fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS CPPFLAGS_asan-noleaks = -O0 -fsanitize-coverage=edge -fsanitize=address -fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
LDFLAGS_asan-noleaks = -fsanitize=address LDFLAGS_asan-noleaks = -fsanitize=address
VALID_CONFIG_c++-compat = 1 VALID_CONFIG_noexcept = 1
CC_c++-compat = $(DEFAULT_CC) CC_noexcept = $(DEFAULT_CC)
CXX_c++-compat = $(DEFAULT_CXX) CXX_noexcept = $(DEFAULT_CXX)
LD_c++-compat = $(DEFAULT_CC) LD_noexcept = $(DEFAULT_CC)
LDXX_c++-compat = $(DEFAULT_CXX) LDXX_noexcept = $(DEFAULT_CXX)
CFLAGS_c++-compat = -Wc++-compat CXXFLAGS_noexcept = -fno-exceptions
CPPFLAGS_c++-compat = -O0 CPPFLAGS_noexcept = -O2
DEFINES_c++-compat = _DEBUG DEBUG DEFINES_noexcept = NDEBUG
VALID_CONFIG_ubsan = 1 VALID_CONFIG_ubsan = 1
REQUIRE_CUSTOM_LIBRARIES_ubsan = 1 REQUIRE_CUSTOM_LIBRARIES_ubsan = 1
@ -207,6 +205,15 @@ LDXX_lto = $(DEFAULT_CXX)
CPPFLAGS_lto = -O2 CPPFLAGS_lto = -O2
DEFINES_lto = NDEBUG DEFINES_lto = NDEBUG
VALID_CONFIG_c++-compat = 1
CC_c++-compat = $(DEFAULT_CC)
CXX_c++-compat = $(DEFAULT_CXX)
LD_c++-compat = $(DEFAULT_CC)
LDXX_c++-compat = $(DEFAULT_CXX)
CFLAGS_c++-compat = -Wc++-compat
CPPFLAGS_c++-compat = -O0
DEFINES_c++-compat = _DEBUG DEBUG
VALID_CONFIG_mutrace = 1 VALID_CONFIG_mutrace = 1
CC_mutrace = $(DEFAULT_CC) CC_mutrace = $(DEFAULT_CC)
CXX_mutrace = $(DEFAULT_CXX) CXX_mutrace = $(DEFAULT_CXX)
@ -643,7 +650,6 @@ ZLIB_DEP = $(LIBDIR)/$(CONFIG)/libz.a
ZLIB_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libz.a ZLIB_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libz.a
ZLIB_MERGE_OBJS = $(LIBZ_OBJS) ZLIB_MERGE_OBJS = $(LIBZ_OBJS)
CPPFLAGS += -Ithird_party/zlib CPPFLAGS += -Ithird_party/zlib
LDFLAGS += -L$(LIBDIR)/$(CONFIG)/zlib
else else
ifeq ($(HAS_PKG_CONFIG),true) ifeq ($(HAS_PKG_CONFIG),true)
CPPFLAGS += $(shell $(PKG_CONFIG) --cflags zlib) CPPFLAGS += $(shell $(PKG_CONFIG) --cflags zlib)
@ -674,7 +680,6 @@ CARES_DEP = $(LIBDIR)/$(CONFIG)/libares.a
CARES_MERGE_OBJS = $(LIBARES_OBJS) CARES_MERGE_OBJS = $(LIBARES_OBJS)
CARES_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libares.a CARES_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libares.a
CPPFLAGS := -Ithird_party/cares -Ithird_party/cares/cares $(CPPFLAGS) CPPFLAGS := -Ithird_party/cares -Ithird_party/cares/cares $(CPPFLAGS)
LDFLAGS := -L$(LIBDIR)/$(CONFIG)/c-ares $(LDFLAGS)
else else
ifeq ($(HAS_PKG_CONFIG),true) ifeq ($(HAS_PKG_CONFIG),true)
PC_REQUIRES_GRPC += libcares PC_REQUIRES_GRPC += libcares
@ -1126,6 +1131,7 @@ cxx_string_ref_test: $(BINDIR)/$(CONFIG)/cxx_string_ref_test
cxx_time_test: $(BINDIR)/$(CONFIG)/cxx_time_test cxx_time_test: $(BINDIR)/$(CONFIG)/cxx_time_test
end2end_test: $(BINDIR)/$(CONFIG)/end2end_test end2end_test: $(BINDIR)/$(CONFIG)/end2end_test
error_details_test: $(BINDIR)/$(CONFIG)/error_details_test error_details_test: $(BINDIR)/$(CONFIG)/error_details_test
exception_test: $(BINDIR)/$(CONFIG)/exception_test
filter_end2end_test: $(BINDIR)/$(CONFIG)/filter_end2end_test filter_end2end_test: $(BINDIR)/$(CONFIG)/filter_end2end_test
generic_end2end_test: $(BINDIR)/$(CONFIG)/generic_end2end_test generic_end2end_test: $(BINDIR)/$(CONFIG)/generic_end2end_test
golden_file_test: $(BINDIR)/$(CONFIG)/golden_file_test golden_file_test: $(BINDIR)/$(CONFIG)/golden_file_test
@ -1154,6 +1160,7 @@ memory_test: $(BINDIR)/$(CONFIG)/memory_test
metrics_client: $(BINDIR)/$(CONFIG)/metrics_client metrics_client: $(BINDIR)/$(CONFIG)/metrics_client
mock_test: $(BINDIR)/$(CONFIG)/mock_test mock_test: $(BINDIR)/$(CONFIG)/mock_test
noop-benchmark: $(BINDIR)/$(CONFIG)/noop-benchmark noop-benchmark: $(BINDIR)/$(CONFIG)/noop-benchmark
orphanable_test: $(BINDIR)/$(CONFIG)/orphanable_test
proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test
proto_utils_test: $(BINDIR)/$(CONFIG)/proto_utils_test proto_utils_test: $(BINDIR)/$(CONFIG)/proto_utils_test
qps_interarrival_test: $(BINDIR)/$(CONFIG)/qps_interarrival_test qps_interarrival_test: $(BINDIR)/$(CONFIG)/qps_interarrival_test
@ -1302,10 +1309,10 @@ third_party/protobuf/configure:
$(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure
$(E) "[MAKE] Building protobuf" $(E) "[MAKE] Building protobuf"
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf
$(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS)) $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS))
$(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf clean
$(Q)$(MAKE) -C third_party/protobuf $(Q)$(MAKE) -C third_party/protobuf
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf
$(Q)mkdir -p $(BINDIR)/$(CONFIG)/protobuf $(Q)mkdir -p $(BINDIR)/$(CONFIG)/protobuf
$(Q)cp third_party/protobuf/src/.libs/libprotoc.a $(LIBDIR)/$(CONFIG)/protobuf $(Q)cp third_party/protobuf/src/.libs/libprotoc.a $(LIBDIR)/$(CONFIG)/protobuf
$(Q)cp third_party/protobuf/src/.libs/libprotobuf.a $(LIBDIR)/$(CONFIG)/protobuf $(Q)cp third_party/protobuf/src/.libs/libprotobuf.a $(LIBDIR)/$(CONFIG)/protobuf
@ -1574,6 +1581,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/cxx_time_test \ $(BINDIR)/$(CONFIG)/cxx_time_test \
$(BINDIR)/$(CONFIG)/end2end_test \ $(BINDIR)/$(CONFIG)/end2end_test \
$(BINDIR)/$(CONFIG)/error_details_test \ $(BINDIR)/$(CONFIG)/error_details_test \
$(BINDIR)/$(CONFIG)/exception_test \
$(BINDIR)/$(CONFIG)/filter_end2end_test \ $(BINDIR)/$(CONFIG)/filter_end2end_test \
$(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/generic_end2end_test \
$(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/golden_file_test \
@ -1595,6 +1603,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/mock_test \
$(BINDIR)/$(CONFIG)/noop-benchmark \ $(BINDIR)/$(CONFIG)/noop-benchmark \
$(BINDIR)/$(CONFIG)/orphanable_test \
$(BINDIR)/$(CONFIG)/proto_server_reflection_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \
$(BINDIR)/$(CONFIG)/proto_utils_test \ $(BINDIR)/$(CONFIG)/proto_utils_test \
$(BINDIR)/$(CONFIG)/qps_interarrival_test \ $(BINDIR)/$(CONFIG)/qps_interarrival_test \
@ -1704,6 +1713,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/cxx_time_test \ $(BINDIR)/$(CONFIG)/cxx_time_test \
$(BINDIR)/$(CONFIG)/end2end_test \ $(BINDIR)/$(CONFIG)/end2end_test \
$(BINDIR)/$(CONFIG)/error_details_test \ $(BINDIR)/$(CONFIG)/error_details_test \
$(BINDIR)/$(CONFIG)/exception_test \
$(BINDIR)/$(CONFIG)/filter_end2end_test \ $(BINDIR)/$(CONFIG)/filter_end2end_test \
$(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/generic_end2end_test \
$(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/golden_file_test \
@ -1725,6 +1735,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/mock_test \
$(BINDIR)/$(CONFIG)/noop-benchmark \ $(BINDIR)/$(CONFIG)/noop-benchmark \
$(BINDIR)/$(CONFIG)/orphanable_test \
$(BINDIR)/$(CONFIG)/proto_server_reflection_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \
$(BINDIR)/$(CONFIG)/proto_utils_test \ $(BINDIR)/$(CONFIG)/proto_utils_test \
$(BINDIR)/$(CONFIG)/qps_interarrival_test \ $(BINDIR)/$(CONFIG)/qps_interarrival_test \
@ -2104,6 +2115,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 )
$(E) "[RUN] Testing error_details_test" $(E) "[RUN] Testing error_details_test"
$(Q) $(BINDIR)/$(CONFIG)/error_details_test || ( echo test error_details_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/error_details_test || ( echo test error_details_test failed ; exit 1 )
$(E) "[RUN] Testing exception_test"
$(Q) $(BINDIR)/$(CONFIG)/exception_test || ( echo test exception_test failed ; exit 1 )
$(E) "[RUN] Testing filter_end2end_test" $(E) "[RUN] Testing filter_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/filter_end2end_test || ( echo test filter_end2end_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/filter_end2end_test || ( echo test filter_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing generic_end2end_test" $(E) "[RUN] Testing generic_end2end_test"
@ -2132,6 +2145,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/mock_test || ( echo test mock_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/mock_test || ( echo test mock_test failed ; exit 1 )
$(E) "[RUN] Testing noop-benchmark" $(E) "[RUN] Testing noop-benchmark"
$(Q) $(BINDIR)/$(CONFIG)/noop-benchmark || ( echo test noop-benchmark failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/noop-benchmark || ( echo test noop-benchmark failed ; exit 1 )
$(E) "[RUN] Testing orphanable_test"
$(Q) $(BINDIR)/$(CONFIG)/orphanable_test || ( echo test orphanable_test failed ; exit 1 )
$(E) "[RUN] Testing proto_server_reflection_test" $(E) "[RUN] Testing proto_server_reflection_test"
$(Q) $(BINDIR)/$(CONFIG)/proto_server_reflection_test || ( echo test proto_server_reflection_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/proto_server_reflection_test || ( echo test proto_server_reflection_test failed ; exit 1 )
$(E) "[RUN] Testing proto_utils_test" $(E) "[RUN] Testing proto_utils_test"
@ -8552,6 +8567,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \ test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \ test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \ test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \ test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \ test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \ test/core/end2end/tests/hpack_size.cc \
@ -8650,6 +8666,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \ test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \ test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \ test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \ test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \ test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \ test/core/end2end/tests/hpack_size.cc \
@ -14974,6 +14991,49 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/util/error_details_test.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(OBJDIR)/$(CONFIG)/test/cpp/util/error_details_test.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc
EXCEPTION_TEST_SRC = \
test/cpp/end2end/exception_test.cc \
EXCEPTION_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(EXCEPTION_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/exception_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/exception_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/exception_test: $(PROTOBUF_DEP) $(EXCEPTION_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(EXCEPTION_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/exception_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/exception_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_exception_test: $(EXCEPTION_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(EXCEPTION_TEST_OBJS:.o=.dep)
endif
endif
FILTER_END2END_TEST_SRC = \ FILTER_END2END_TEST_SRC = \
test/cpp/end2end/filter_end2end_test.cc \ test/cpp/end2end/filter_end2end_test.cc \
@ -16086,6 +16146,49 @@ endif
endif endif
ORPHANABLE_TEST_SRC = \
test/core/support/orphanable_test.cc \
ORPHANABLE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(ORPHANABLE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/orphanable_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/orphanable_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/orphanable_test: $(PROTOBUF_DEP) $(ORPHANABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(ORPHANABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/orphanable_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/core/support/orphanable_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_orphanable_test: $(ORPHANABLE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(ORPHANABLE_TEST_OBJS:.o=.dep)
endif
endif
PROTO_SERVER_REFLECTION_TEST_SRC = \ PROTO_SERVER_REFLECTION_TEST_SRC = \
test/cpp/end2end/proto_server_reflection_test.cc \ test/cpp/end2end/proto_server_reflection_test.cc \

@ -2,4 +2,5 @@ set noparent
@nicolasnoble @nicolasnoble
@dgquintas @dgquintas
@a11r @a11r
@vjpai

@ -30,9 +30,12 @@ def _get_external_deps(external_deps):
ret = [] ret = []
for dep in external_deps: for dep in external_deps:
if dep == "nanopb": if dep == "nanopb":
ret.append("//third_party/nanopb") ret += ["//third_party/nanopb"]
elif dep == "cares":
ret += select({"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],})
else: else:
ret.append("//external:" + dep) ret += ["//external:" + dep]
return ret return ret
def _maybe_update_cc_library_hdrs(hdrs): def _maybe_update_cc_library_hdrs(hdrs):
@ -60,6 +63,10 @@ def grpc_cc_library(name, srcs = [], public_hdrs = [], hdrs = [],
defines = select({"//:grpc_no_ares": ["GRPC_ARES=0"], defines = select({"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],}) + "//conditions:default": [],}) +
select({"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"], select({"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],}) +
select({"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions":
["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],}), "//conditions:default": [],}),
hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs), hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
deps = deps + _get_external_deps(external_deps), deps = deps + _get_external_deps(external_deps),

@ -397,6 +397,7 @@ filegroups:
- src/core/lib/slice/slice_internal.h - src/core/lib/slice/slice_internal.h
- src/core/lib/slice/slice_string_helpers.h - src/core/lib/slice/slice_string_helpers.h
- src/core/lib/support/debug_location.h - src/core/lib/support/debug_location.h
- src/core/lib/support/orphanable.h
- src/core/lib/support/ref_counted.h - src/core/lib/support/ref_counted.h
- src/core/lib/support/ref_counted_ptr.h - src/core/lib/support/ref_counted_ptr.h
- src/core/lib/support/vector.h - src/core/lib/support/vector.h
@ -4007,6 +4008,19 @@ targets:
deps: deps:
- grpc++_error_details - grpc++_error_details
- grpc++ - grpc++
- name: exception_test
gtest: true
build: test
language: c++
src:
- test/cpp/end2end/exception_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- name: filter_end2end_test - name: filter_end2end_test
gtest: true gtest: true
build: test build: test
@ -4390,6 +4404,20 @@ targets:
deps: deps:
- benchmark - benchmark
defaults: benchmark defaults: benchmark
- name: orphanable_test
gtest: true
build: test
language: c++
src:
- test/core/support/orphanable_test.cc
deps:
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
uses:
- grpc++_test
- name: proto_server_reflection_test - name: proto_server_reflection_test
gtest: true gtest: true
build: test build: test
@ -4927,7 +4955,6 @@ configs:
DEFINES: NDEBUG DEFINES: NDEBUG
dbg: dbg:
CPPFLAGS: -O0 CPPFLAGS: -O0
CXXFLAGS: -fno-exceptions
DEFINES: _DEBUG DEBUG DEFINES: _DEBUG DEBUG
gcov: gcov:
CC: gcc CC: gcc
@ -4968,10 +4995,13 @@ configs:
CPPFLAGS: -O3 -fno-omit-frame-pointer CPPFLAGS: -O3 -fno-omit-frame-pointer
DEFINES: NDEBUG DEFINES: NDEBUG
LDFLAGS: -rdynamic LDFLAGS: -rdynamic
opt: noexcept:
CPPFLAGS: -O2 CPPFLAGS: -O2
CXXFLAGS: -fno-exceptions CXXFLAGS: -fno-exceptions
DEFINES: NDEBUG DEFINES: NDEBUG
opt:
CPPFLAGS: -O2
DEFINES: NDEBUG
stapprof: stapprof:
CPPFLAGS: -O2 -DGRPC_STAP_PROFILER CPPFLAGS: -O2 -DGRPC_STAP_PROFILER
DEFINES: NDEBUG DEFINES: NDEBUG

@ -20,14 +20,17 @@ if("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "module")
add_subdirectory(${BENCHMARK_ROOT_DIR} third_party/benchmark) add_subdirectory(${BENCHMARK_ROOT_DIR} third_party/benchmark)
if(TARGET benchmark) if(TARGET benchmark)
set(_gRPC_BENCHMARK_LIBRARIES benchmark) set(_gRPC_BENCHMARK_LIBRARIES benchmark)
set(_gRPC_BENCHMARK_INCLUDE_DIR "${BENCHMARK_ROOT_DIR}/include")
endif() endif()
else() else()
message(WARNING "gRPC_BENCHMARK_PROVIDER is \"module\" but BENCHMARK_ROOT_DIR is wrong") message(WARNING "gRPC_BENCHMARK_PROVIDER is \"module\" but BENCHMARK_ROOT_DIR is wrong")
endif() endif()
elseif("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "package") elseif("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "package")
find_package(benchmark) find_package(benchmark REQUIRED)
if(TARGET benchmark::benchmark) if(TARGET benchmark::benchmark)
set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark) set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark)
# extract the include dir from target's properties
get_target_property(_gRPC_BENCHMARK_INCLUDE_DIR benchmark::benchmark INTERFACE_INCLUDE_DIRECTORIES)
endif() endif()
set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n find_package(benchmark)\nendif()") set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n find_package(benchmark)\nendif()")
endif() endif()

@ -18,11 +18,13 @@ if("${gRPC_CARES_PROVIDER}" STREQUAL "module")
endif() endif()
set(CARES_SHARED OFF CACHE BOOL "disable shared library") set(CARES_SHARED OFF CACHE BOOL "disable shared library")
set(CARES_STATIC ON CACHE BOOL "link cares statically") set(CARES_STATIC ON CACHE BOOL "link cares statically")
set(CARES_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares")
add_subdirectory(third_party/cares/cares) add_subdirectory(third_party/cares/cares)
if(TARGET c-ares) if(TARGET c-ares)
set(_gRPC_CARES_LIBRARIES c-ares) set(_gRPC_CARES_LIBRARIES c-ares)
set(_gRPC_CARES_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares" "${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares")
endif() endif()
if(gRPC_INSTALL) if(gRPC_INSTALL)
message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_CARES_PROVIDER is \"module\"") message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_CARES_PROVIDER is \"module\"")
set(gRPC_INSTALL FALSE) set(gRPC_INSTALL FALSE)
@ -31,6 +33,7 @@ elseif("${gRPC_CARES_PROVIDER}" STREQUAL "package")
find_package(c-ares REQUIRED CONFIG) find_package(c-ares REQUIRED CONFIG)
if(TARGET c-ares::cares) if(TARGET c-ares::cares)
set(_gRPC_CARES_LIBRARIES c-ares::cares) set(_gRPC_CARES_LIBRARIES c-ares::cares)
set(_gRPC_CARES_INCLUDE_DIR ${c-ares_INCLUDE_DIR})
endif() endif()
set(_gRPC_FIND_CARES "if(NOT c-ares_FOUND)\n find_package(c-ares CONFIG)\nendif()") set(_gRPC_FIND_CARES "if(NOT c-ares_FOUND)\n find_package(c-ares CONFIG)\nendif()")
endif() endif()

@ -17,17 +17,19 @@ if("${gRPC_GFLAGS_PROVIDER}" STREQUAL "module")
set(GFLAGS_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags) set(GFLAGS_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags)
endif() endif()
if(EXISTS "${GFLAGS_ROOT_DIR}/CMakeLists.txt") if(EXISTS "${GFLAGS_ROOT_DIR}/CMakeLists.txt")
add_subdirectory(${GFLAGS_ROOT_DIR} third_party/gflags) add_subdirectory(${GFLAGS_ROOT_DIR} third_party/gflags)
if(TARGET gflags_static) if(TARGET gflags_static)
set(_gRPC_GFLAGS_LIBRARIES gflags_static) set(_gRPC_GFLAGS_LIBRARIES gflags_static)
endif() set(_gRPC_GFLAGS_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include")
endif()
else() else()
message(WARNING "gRPC_GFLAGS_PROVIDER is \"module\" but GFLAGS_ROOT_DIR is wrong") message(WARNING "gRPC_GFLAGS_PROVIDER is \"module\" but GFLAGS_ROOT_DIR is wrong")
endif() endif()
elseif("${gRPC_GFLAGS_PROVIDER}" STREQUAL "package") elseif("${gRPC_GFLAGS_PROVIDER}" STREQUAL "package")
find_package(gflags) find_package(gflags REQUIRED)
if(TARGET gflags::gflags) if(TARGET gflags::gflags)
set(_gRPC_GFLAGS_LIBRARIES gflags::gflags) set(_gRPC_GFLAGS_LIBRARIES gflags::gflags)
set(_gRPC_GFLAGS_INCLUDE_DIR ${GFLAGS_INCLUDE_DIR})
endif() endif()
set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n find_package(gflags)\nendif()") set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n find_package(gflags)\nendif()")
endif() endif()

@ -27,7 +27,7 @@ if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
if(NOT PROTOBUF_ROOT_DIR) if(NOT PROTOBUF_ROOT_DIR)
set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf)
endif() endif()
set(PROTOBUF_WELLKNOWN_IMPORT_DIR ${PROTOBUF_ROOT_DIR}/src)
if(EXISTS "${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt") if(EXISTS "${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt")
set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries") set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf)
@ -41,6 +41,9 @@ if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
set(_gRPC_PROTOBUF_PROTOC protoc) set(_gRPC_PROTOBUF_PROTOC protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>) set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
endif() endif()
set(_gRPC_PROTOBUF_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}")
# For well-known .proto files distributed with protobuf
set(_gRPC_PROTOBUF_WELLKNOWN_INCLUDE_DIR "${PROTOBUF_ROOT_DIR}/src")
else() else()
message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong") message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
endif() endif()
@ -50,6 +53,11 @@ if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
endif() endif()
elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package") elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
find_package(Protobuf REQUIRED ${gRPC_PROTOBUF_PACKAGE_TYPE}) find_package(Protobuf REQUIRED ${gRPC_PROTOBUF_PACKAGE_TYPE})
# {Protobuf,PROTOBUF}_FOUND is defined based on find_package type ("MODULE" vs "CONFIG").
# For "MODULE", the case has also changed between cmake 3.5 and 3.6.
# We use the legacy uppercase version for *_LIBRARIES AND *_INCLUDE_DIRS variables
# as newer cmake versions provide them too for backward compatibility.
if(Protobuf_FOUND OR PROTOBUF_FOUND) if(Protobuf_FOUND OR PROTOBUF_FOUND)
if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME}) if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME}) set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
@ -58,8 +66,11 @@ elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
endif() endif()
if(TARGET protobuf::libprotoc) if(TARGET protobuf::libprotoc)
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc) set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc)
# extract the include dir from target's properties
get_target_property(_gRPC_PROTOBUF_WELLKNOWN_INCLUDE_DIR protobuf::libprotoc INTERFACE_INCLUDE_DIRECTORIES)
else() else()
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${PROTOBUF_PROTOC_LIBRARIES}) set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${PROTOBUF_PROTOC_LIBRARIES})
set(_gRPC_PROTOBUF_WELLKNOWN_INCLUDE_DIR ${PROTOBUF_INCLUDE_DIRS})
endif() endif()
if(TARGET protobuf::protoc) if(TARGET protobuf::protoc)
set(_gRPC_PROTOBUF_PROTOC protobuf::protoc) set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
@ -68,10 +79,7 @@ elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
set(_gRPC_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE}) set(_gRPC_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE}) set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
endif() endif()
set(_gRPC_PROTOBUF_INCLUDE_DIR ${PROTOBUF_INCLUDE_DIRS})
set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})\nendif()") set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})\nendif()")
endif() endif()
if(PROTOBUF_FOUND)
include_directories(${PROTOBUF_INCLUDE_DIRS})
endif()
set(PROTOBUF_WELLKNOWN_IMPORT_DIR /usr/local/include)
endif() endif()

@ -16,15 +16,15 @@ if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module")
if(NOT ZLIB_ROOT_DIR) if(NOT ZLIB_ROOT_DIR)
set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib)
endif() endif()
set(ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}")
if(EXISTS "${ZLIB_ROOT_DIR}/CMakeLists.txt") if(EXISTS "${ZLIB_ROOT_DIR}/CMakeLists.txt")
# TODO(jtattermusch): workaround for https://github.com/madler/zlib/issues/218 # TODO(jtattermusch): workaround for https://github.com/madler/zlib/issues/218
include_directories(${ZLIB_INCLUDE_DIR}) include_directories("${ZLIB_ROOT_DIR}")
add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib)
add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib) if(TARGET zlibstatic)
if(TARGET zlibstatic) set(_gRPC_ZLIB_LIBRARIES zlibstatic)
set(_gRPC_ZLIB_LIBRARIES zlibstatic) set(_gRPC_ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}" "${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib")
endif() endif()
else() else()
message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong") message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong")
endif() endif()
@ -35,5 +35,6 @@ if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module")
elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package") elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package")
find_package(ZLIB REQUIRED) find_package(ZLIB REQUIRED)
set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES}) set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
set(_gRPC_ZLIB_INCLUDE_DIR ${ZLIB_INCLUDE_DIRS})
set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()") set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()")
endif() endif()

@ -127,3 +127,8 @@ some configuration as environment variables that can be set.
there is no active polling thread. They help reconnect disconnected client there is no active polling thread. They help reconnect disconnected client
channels (mostly due to idleness), so that the next RPC on this channel won't channels (mostly due to idleness), so that the next RPC on this channel won't
fail. Set to 0 to turn off the backup polls. fail. Set to 0 to turn off the backup polls.
* GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL
if set, flow control will be effectively disabled. Max out all values and
assume the remote peer does the same. Thus we can ignore any flow control
bookkeeping, error checking, and decision making

@ -16,15 +16,22 @@ endif()
find_package(Protobuf REQUIRED) find_package(Protobuf REQUIRED)
message(STATUS "Using protobuf ${protobuf_VERSION}") message(STATUS "Using protobuf ${protobuf_VERSION}")
if(Protobuf_FOUND) # {Protobuf,PROTOBUF}_FOUND is defined based on find_package type ("MODULE" vs "CONFIG").
# Protobuf_FOUND is set for package type "CONFIG" # For "MODULE", the case has also changed between cmake 3.5 and 3.6.
set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf) # We use the legacy uppercase version for *_LIBRARIES AND *_INCLUDE_DIRS variables
set(_PROTOBUF_PROTOC protobuf::protoc) # as newer cmake versions provide them too for backward compatibility.
elseif(PROTOBUF_FOUND) if(Protobuf_FOUND OR PROTOBUF_FOUND)
# PROTOBUF_FOUND is set for package type "MODULE" if(TARGET protobuf::libprotobuf)
set(_PROTOBUF_LIBPROTOBUF ${PROTOBUF_LIBRARIES}) set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf)
set(_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE}) else()
include_directories(${PROTOBUF_INCLUDE_DIRS}) set(_PROTOBUF_LIBPROTOBUF ${PROTOBUF_LIBRARIES})
include_directories(${PROTOBUF_INCLUDE_DIRS})
endif()
if(TARGET protobuf::protoc)
set(_PROTOBUF_PROTOC $<TARGET_FILE:protobuf::protoc>)
else()
set(_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})
endif()
else() else()
message(WARNING "Failed to locate libprotobuf and protoc!") message(WARNING "Failed to locate libprotobuf and protoc!")
endif() endif()

@ -420,6 +420,7 @@ Pod::Spec.new do |s|
'src/core/lib/slice/slice_internal.h', 'src/core/lib/slice/slice_internal.h',
'src/core/lib/slice/slice_string_helpers.h', 'src/core/lib/slice/slice_string_helpers.h',
'src/core/lib/support/debug_location.h', 'src/core/lib/support/debug_location.h',
'src/core/lib/support/orphanable.h',
'src/core/lib/support/ref_counted.h', 'src/core/lib/support/ref_counted.h',
'src/core/lib/support/ref_counted_ptr.h', 'src/core/lib/support/ref_counted_ptr.h',
'src/core/lib/support/vector.h', 'src/core/lib/support/vector.h',
@ -901,6 +902,7 @@ Pod::Spec.new do |s|
'src/core/lib/slice/slice_internal.h', 'src/core/lib/slice/slice_internal.h',
'src/core/lib/slice/slice_string_helpers.h', 'src/core/lib/slice/slice_string_helpers.h',
'src/core/lib/support/debug_location.h', 'src/core/lib/support/debug_location.h',
'src/core/lib/support/orphanable.h',
'src/core/lib/support/ref_counted.h', 'src/core/lib/support/ref_counted.h',
'src/core/lib/support/ref_counted_ptr.h', 'src/core/lib/support/ref_counted_ptr.h',
'src/core/lib/support/vector.h', 'src/core/lib/support/vector.h',
@ -1039,6 +1041,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc', 'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc', 'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc', 'test/core/end2end/tests/hpack_size.cc',
@ -1084,6 +1087,6 @@ Pod::Spec.new do |s|
# TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path? # TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path?
s.prepare_command = <<-END_OF_COMMAND s.prepare_command = <<-END_OF_COMMAND
find src/core/ -type f -exec sed -E -i '.back' 's;#include "third_party/nanopb/(.*)";#include <nanopb/\\1>;g' {} \\\; find src/core/ -type f -exec sed -E -i'.back' 's;#include "third_party/nanopb/(.*)";#include <nanopb/\\1>;g' {} \\\;
END_OF_COMMAND END_OF_COMMAND
end end

@ -346,6 +346,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/slice/slice_internal.h ) s.files += %w( src/core/lib/slice/slice_internal.h )
s.files += %w( src/core/lib/slice/slice_string_helpers.h ) s.files += %w( src/core/lib/slice/slice_string_helpers.h )
s.files += %w( src/core/lib/support/debug_location.h ) s.files += %w( src/core/lib/support/debug_location.h )
s.files += %w( src/core/lib/support/orphanable.h )
s.files += %w( src/core/lib/support/ref_counted.h ) s.files += %w( src/core/lib/support/ref_counted.h )
s.files += %w( src/core/lib/support/ref_counted_ptr.h ) s.files += %w( src/core/lib/support/ref_counted_ptr.h )
s.files += %w( src/core/lib/support/vector.h ) s.files += %w( src/core/lib/support/vector.h )

@ -2381,6 +2381,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc', 'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc', 'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc', 'test/core/end2end/tests/hpack_size.cc',
@ -2453,6 +2454,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc', 'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc', 'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc', 'test/core/end2end/tests/hpack_size.cc',

@ -41,8 +41,6 @@ template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler; class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
template <StatusCode code>
class ErrorMethodHandler;
template <class R> template <class R>
class DeserializeFuncType; class DeserializeFuncType;
} // namespace internal } // namespace internal
@ -109,8 +107,6 @@ class ByteBuffer final {
friend class internal::RpcMethodHandler; friend class internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ServerStreamingHandler; friend class internal::ServerStreamingHandler;
template <StatusCode code>
friend class internal::ErrorMethodHandler;
template <class R> template <class R>
friend class internal::DeserializeFuncType; friend class internal::DeserializeFuncType;

@ -78,8 +78,7 @@ template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
template <StatusCode code> class UnknownMethodHandler;
class ErrorMethodHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler; class TemplatedBidiStreamingHandler;
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
@ -222,8 +221,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
friend class ::grpc::internal::ServerStreamingHandler; friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
friend class ::grpc::internal::TemplatedBidiStreamingHandler; friend class ::grpc::internal::TemplatedBidiStreamingHandler;
template <StatusCode code> friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::internal::ErrorMethodHandler;
friend class ::grpc::Server; friend class ::grpc::Server;
friend class ::grpc::ServerContext; friend class ::grpc::ServerContext;
friend class ::grpc::ServerInterface; friend class ::grpc::ServerInterface;

@ -27,6 +27,27 @@
namespace grpc { namespace grpc {
namespace internal { namespace internal {
// Invoke the method handler, fill in the status, and
// return whether or not we finished safely (without an exception).
// Note that exception handling is 0-cost in most compiler/library
// implementations (except when an exception is actually thrown),
// so this process doesn't require additional overhead in the common case.
// Additionally, we don't need to return if we caught an exception or not;
// the handling is the same in either case.
template <class Callable>
Status CatchingFunctionHandler(Callable&& handler) {
#if GRPC_ALLOW_EXCEPTIONS
try {
return handler();
} catch (...) {
return Status(StatusCode::UNKNOWN, "Unexpected error in RPC handling");
}
#else // GRPC_ALLOW_EXCEPTIONS
return handler();
#endif // GRPC_ALLOW_EXCEPTIONS
}
/// A wrapper class of an application provided rpc method handler. /// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler : public MethodHandler { class RpcMethodHandler : public MethodHandler {
@ -43,7 +64,9 @@ class RpcMethodHandler : public MethodHandler {
param.request.bbuf_ptr(), &req); param.request.bbuf_ptr(), &req);
ResponseType rsp; ResponseType rsp;
if (status.ok()) { if (status.ok()) {
status = func_(service_, param.server_context, &req, &rsp); status = CatchingFunctionHandler([this, &param, &req, &rsp] {
return func_(service_, param.server_context, &req, &rsp);
});
} }
GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
@ -86,7 +109,9 @@ class ClientStreamingHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final { void RunHandler(const HandlerParameter& param) final {
ServerReader<RequestType> reader(param.call, param.server_context); ServerReader<RequestType> reader(param.call, param.server_context);
ResponseType rsp; ResponseType rsp;
Status status = func_(service_, param.server_context, &reader, &rsp); Status status = CatchingFunctionHandler([this, &param, &reader, &rsp] {
return func_(service_, param.server_context, &reader, &rsp);
});
GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
@ -130,7 +155,9 @@ class ServerStreamingHandler : public MethodHandler {
if (status.ok()) { if (status.ok()) {
ServerWriter<ResponseType> writer(param.call, param.server_context); ServerWriter<ResponseType> writer(param.call, param.server_context);
status = func_(service_, param.server_context, &req, &writer); status = CatchingFunctionHandler([this, &param, &req, &writer] {
return func_(service_, param.server_context, &req, &writer);
});
} }
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops; CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
@ -172,7 +199,9 @@ class TemplatedBidiStreamingHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final { void RunHandler(const HandlerParameter& param) final {
Streamer stream(param.call, param.server_context); Streamer stream(param.call, param.server_context);
Status status = func_(param.server_context, &stream); Status status = CatchingFunctionHandler([this, &param, &stream] {
return func_(param.server_context, &stream);
});
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops; CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
if (!param.server_context->sent_initial_metadata_) { if (!param.server_context->sent_initial_metadata_) {
@ -242,14 +271,12 @@ class SplitServerStreamingHandler
ServerSplitStreamer<RequestType, ResponseType>, false>(func) {} ServerSplitStreamer<RequestType, ResponseType>, false>(func) {}
}; };
/// General method handler class for errors that prevent real method use /// Handle unknown method by returning UNIMPLEMENTED error.
/// e.g., handle unknown method by returning UNIMPLEMENTED error. class UnknownMethodHandler : public MethodHandler {
template <StatusCode code>
class ErrorMethodHandler : public MethodHandler {
public: public:
template <class T> template <class T>
static void FillOps(ServerContext* context, T* ops) { static void FillOps(ServerContext* context, T* ops) {
Status status(code, ""); Status status(StatusCode::UNIMPLEMENTED, "");
if (!context->sent_initial_metadata_) { if (!context->sent_initial_metadata_) {
ops->SendInitialMetadata(context->initial_metadata_, ops->SendInitialMetadata(context->initial_metadata_,
context->initial_metadata_flags()); context->initial_metadata_flags());
@ -266,18 +293,9 @@ class ErrorMethodHandler : public MethodHandler {
FillOps(param.server_context, &ops); FillOps(param.server_context, &ops);
param.call->PerformOps(&ops); param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops); param.call->cq()->Pluck(&ops);
// We also have to destroy any request payload in the handler parameter
ByteBuffer* payload = param.request.bbuf_ptr();
if (payload != nullptr) {
payload->Clear();
}
} }
}; };
typedef ErrorMethodHandler<StatusCode::UNIMPLEMENTED> UnknownMethodHandler;
typedef ErrorMethodHandler<StatusCode::RESOURCE_EXHAUSTED>
ResourceExhaustedHandler;
} // namespace internal } // namespace internal
} // namespace grpc } // namespace grpc

@ -63,8 +63,7 @@ template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
template <StatusCode code> class UnknownMethodHandler;
class ErrorMethodHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler; class TemplatedBidiStreamingHandler;
class Call; class Call;
@ -256,8 +255,7 @@ class ServerContext {
friend class ::grpc::internal::ServerStreamingHandler; friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
friend class ::grpc::internal::TemplatedBidiStreamingHandler; friend class ::grpc::internal::TemplatedBidiStreamingHandler;
template <StatusCode code> friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::internal::ErrorMethodHandler;
friend class ::grpc::ClientContext; friend class ::grpc::ClientContext;
/// Prevent copying. /// Prevent copying.

@ -35,7 +35,6 @@
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc++/support/status.h> #include <grpc++/support/status.h>
#include <grpc/compression.h> #include <grpc/compression.h>
#include <grpc/support/thd.h>
struct grpc_server; struct grpc_server;
@ -139,20 +138,10 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
/// ///
/// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on
/// server completion queues passed via sync_server_cqs param. /// server completion queues passed via sync_server_cqs param.
///
/// \param thread_creator The thread creation function for the sync
/// server. Typically gpr_thd_new
///
/// \param thread_joiner The thread joining function for the sync
/// server. Typically gpr_thd_join
Server(int max_message_size, ChannelArguments* args, Server(int max_message_size, ChannelArguments* args,
std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>> std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
sync_server_cqs, sync_server_cqs,
int min_pollers, int max_pollers, int sync_cq_timeout_msec, int min_pollers, int max_pollers, int sync_cq_timeout_msec);
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner);
/// Register a service. This call does not take ownership of the service. /// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance. /// The service must exist for the lifetime of the Server instance.
@ -231,14 +220,6 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
std::unique_ptr<HealthCheckServiceInterface> health_check_service_; std::unique_ptr<HealthCheckServiceInterface> health_check_service_;
bool health_check_service_disabled_; bool health_check_service_disabled_;
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator_;
std::function<void(gpr_thd_id)> thread_joiner_;
// A special handler for resource exhausted in sync case
std::unique_ptr<internal::MethodHandler> resource_exhausted_handler_;
}; };
} // namespace grpc } // namespace grpc

@ -20,7 +20,6 @@
#define GRPCXX_SERVER_BUILDER_H #define GRPCXX_SERVER_BUILDER_H
#include <climits> #include <climits>
#include <functional>
#include <map> #include <map>
#include <memory> #include <memory>
#include <vector> #include <vector>
@ -31,7 +30,6 @@
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc/compression.h> #include <grpc/compression.h>
#include <grpc/support/cpu.h> #include <grpc/support/cpu.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
#include <grpc/support/workaround_list.h> #include <grpc/support/workaround_list.h>
@ -49,7 +47,6 @@ class Service;
namespace testing { namespace testing {
class ServerBuilderPluginTest; class ServerBuilderPluginTest;
class ServerBuilderThreadCreatorOverrideTest;
} // namespace testing } // namespace testing
/// A builder class for the creation and startup of \a grpc::Server instances. /// A builder class for the creation and startup of \a grpc::Server instances.
@ -216,17 +213,6 @@ class ServerBuilder {
private: private:
friend class ::grpc::testing::ServerBuilderPluginTest; friend class ::grpc::testing::ServerBuilderPluginTest;
friend class ::grpc::testing::ServerBuilderThreadCreatorOverrideTest;
ServerBuilder& SetThreadFunctions(
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner) {
thread_creator_ = thread_creator;
thread_joiner_ = thread_joiner;
return *this;
}
struct Port { struct Port {
grpc::string addr; grpc::string addr;
@ -286,11 +272,6 @@ class ServerBuilder {
grpc_compression_algorithm algorithm; grpc_compression_algorithm algorithm;
} maybe_default_compression_algorithm_; } maybe_default_compression_algorithm_;
uint32_t enabled_compression_algorithms_bitset_; uint32_t enabled_compression_algorithms_bitset_;
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator_;
std::function<void(gpr_thd_id)> thread_joiner_;
}; };
} // namespace grpc } // namespace grpc

@ -485,6 +485,21 @@ typedef unsigned __int64 uint64_t;
#endif /* GPR_ATTRIBUTE_NO_TSAN (2) */ #endif /* GPR_ATTRIBUTE_NO_TSAN (2) */
#endif /* GPR_ATTRIBUTE_NO_TSAN (1) */ #endif /* GPR_ATTRIBUTE_NO_TSAN (1) */
/* GRPC_ALLOW_EXCEPTIONS should be 0 or 1 if exceptions are allowed or not */
#ifndef GRPC_ALLOW_EXCEPTIONS
/* If not already set, set to 1 on Windows (style guide standard) but to
* 0 on non-Windows platforms unless the compiler defines __EXCEPTIONS */
#ifdef GPR_WINDOWS
#define GRPC_ALLOW_EXCEPTIONS 1
#else /* GPR_WINDOWS */
#ifdef __EXCEPTIONS
#define GRPC_ALLOW_EXCEPTIONS 1
#else /* __EXCEPTIONS */
#define GRPC_ALLOW_EXCEPTIONS 0
#endif /* __EXCEPTIONS */
#endif /* __GPR_WINDOWS */
#endif /* GRPC_ALLOW_EXCEPTIONS */
#ifndef __STDC_FORMAT_MACROS #ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS
#endif #endif

@ -358,6 +358,7 @@
<file baseinstalldir="/" name="src/core/lib/slice/slice_internal.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/slice/slice_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_string_helpers.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/slice/slice_string_helpers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/debug_location.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/debug_location.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/orphanable.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/ref_counted.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/ref_counted.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/ref_counted_ptr.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/ref_counted_ptr.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/vector.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/vector.h" role="src" />

@ -553,6 +553,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
} }
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties, grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties); chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel"); GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
} }
chand->lb_policy = new_lb_policy; chand->lb_policy = new_lb_policy;
@ -658,6 +659,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
if (chand->lb_policy != nullptr) { if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties, grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties); chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel"); GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
chand->lb_policy = nullptr; chand->lb_policy = nullptr;
} }
@ -792,6 +794,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
if (chand->lb_policy != nullptr) { if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties, grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties); chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel"); GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
} }
gpr_free(chand->info_lb_policy_name); gpr_free(chand->info_lb_policy_name);
@ -852,12 +855,10 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* subchannel_call; grpc_subchannel_call* subchannel_call;
grpc_error* error; grpc_error* error;
grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending. grpc_lb_policy_pick_state pick;
grpc_closure lb_pick_closure; grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure; grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity* pollent; grpc_polling_entity* pollent;
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES]; grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
@ -866,8 +867,6 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch* initial_metadata_batch; grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete; grpc_closure on_complete;
grpc_closure* original_on_complete; grpc_closure* original_on_complete;
} call_data; } call_data;
@ -1004,17 +1003,17 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
grpc_error* error) { grpc_error* error) {
channel_data* chand = (channel_data*)elem->channel_data; channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data; call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = { const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent calld->pollent, // pollent
calld->path, // path calld->path, // path
calld->call_start_time, // start_time calld->call_start_time, // start_time
calld->deadline, // deadline calld->deadline, // deadline
calld->arena, // arena calld->arena, // arena
calld->subchannel_call_context, // context calld->pick.subchannel_call_context, // context
calld->call_combiner // call_combiner calld->call_combiner // call_combiner
}; };
grpc_error* new_error = grpc_connected_subchannel_create_call( grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
calld->connected_subchannel, &call_args, &calld->subchannel_call); call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error)); chand, calld, calld->subchannel_call, grpc_error_string(new_error));
@ -1032,7 +1031,7 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) { static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = (call_data*)elem->call_data; call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data; channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == nullptr) { if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel. // Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error); GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE calld->error = error == GRPC_ERROR_NONE
@ -1071,13 +1070,16 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg; grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data; channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data; call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != nullptr) { // Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy); chand, calld, chand->lb_policy);
} }
grpc_lb_policy_cancel_pick_locked( grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel"); GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
} }
@ -1092,9 +1094,6 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld); chand, calld);
} }
GPR_ASSERT(calld->lb_policy != nullptr);
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
async_pick_done_locked(elem, GRPC_ERROR_REF(error)); async_pick_done_locked(elem, GRPC_ERROR_REF(error));
} }
@ -1128,26 +1127,21 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} }
} }
const grpc_lb_policy_pick_args inputs = { calld->pick.initial_metadata =
calld->initial_metadata_batch->payload->send_initial_metadata calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata, .send_initial_metadata;
initial_metadata_flags, &calld->lb_token_mdelem}; calld->pick.initial_metadata_flags = initial_metadata_flags;
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem, GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner)); grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked( calld->pick.on_complete = &calld->lb_pick_closure;
chand->lb_policy, &inputs, &calld->connected_subchannel, const bool pick_done =
calld->subchannel_call_context, nullptr, &calld->lb_pick_closure); grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) { if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */ /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously", gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld); chand, calld);
} }
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
} else { } else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel"); GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel( grpc_call_combiner_set_notify_on_cancel(
@ -1289,7 +1283,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg; grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data; call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data; channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == nullptr); GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) { if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick. // We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) { if (pick_callback_start_locked(elem)) {
@ -1467,15 +1461,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call, GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call"); "client_channel_destroy_call");
} }
GPR_ASSERT(calld->lb_policy == nullptr);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0); GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != nullptr) { if (calld->pick.connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked"); calld->pick.connected_subchannel.reset();
} }
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (calld->subchannel_call_context[i].value != nullptr) { if (calld->pick.subchannel_call_context[i].value != nullptr) {
calld->subchannel_call_context[i].destroy( calld->pick.subchannel_call_context[i].destroy(
calld->subchannel_call_context[i].value); calld->pick.subchannel_call_context[i].value);
} }
} }
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);

@ -19,8 +19,6 @@
#include "src/core/ext/filters/client_channel/lb_policy.h" #include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount( grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount"); false, "lb_policy_refcount");
@ -28,91 +26,60 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable, const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) { grpc_combiner* combiner) {
policy->vtable = vtable; policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS); gpr_ref_init(&policy->refs, 1);
policy->interested_parties = grpc_pollset_set_create(); policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy"); policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
} }
#ifndef NDEBUG #ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose const char* reason) {
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason if (grpc_trace_lb_policy_refcount.enabled()) {
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
old_refs, old_refs + 1, reason);
}
#else #else
#define REF_FUNC_EXTRA_ARGS void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
#define REF_MUTATE_EXTRA_ARGS
#define REF_FUNC_PASS_ARGS(new_reason)
#define REF_MUTATE_PASS_ARGS(x)
#endif #endif
gpr_ref(&lb_policy->refs);
}
static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG #ifndef NDEBUG
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) { if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c, "LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
purpose, old_val, old_val + delta, reason); old_refs, old_refs - 1, reason);
} }
#else
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
#endif #endif
return old_val; if (gpr_unref(&lb_policy->refs)) {
} grpc_pollset_set_destroy(lb_policy->interested_parties);
grpc_combiner* combiner = lb_policy->combiner;
void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) { lb_policy->vtable->destroy(lb_policy);
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF")); GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
static void shutdown_locked(void* arg, grpc_error* error) {
grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(policy);
GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
}
void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
} }
} }
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) { void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF")); grpc_lb_policy* new_policy) {
} policy->vtable->shutdown_locked(policy, new_policy);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(policy->interested_parties);
grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(policy);
GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
} }
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy, int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args, grpc_lb_policy_pick_state* pick) {
grpc_connected_subchannel** target, return policy->vtable->pick_locked(policy, pick);
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(policy, pick_args, target, context,
user_data, on_complete);
} }
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy, void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error) { grpc_error* error) {
policy->vtable->cancel_pick_locked(policy, target, error); policy->vtable->cancel_pick_locked(policy, pick, error);
} }
void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy, void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,

@ -21,6 +21,7 @@
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/ref_counted_ptr.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
/** A load balancing policy: specified by a vtable and a struct (which /** A load balancing policy: specified by a vtable and a struct (which
@ -33,7 +34,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy { struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable; const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair; gpr_refcount refs;
/* owned pointer to interested parties in load balancing decisions */ /* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set* interested_parties; grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */ /* combiner under which lb_policy actions take place */
@ -42,32 +43,42 @@ struct grpc_lb_policy {
grpc_closure* request_reresolution; grpc_closure* request_reresolution;
}; };
/** Extra arguments for an LB pick */ /// State used for an LB pick.
typedef struct grpc_lb_policy_pick_args { typedef struct grpc_lb_policy_pick_state {
/** Initial metadata associated with the picking call. */ /// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata; grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a /// Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in /// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */ /// grpc_types.h.
uint32_t initial_metadata_flags; uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */ /// Storage for LB token in \a initial_metadata, or NULL if not used.
grpc_linked_mdelem* lb_token_mdelem_storage; grpc_linked_mdelem lb_token_mdelem_storage;
} grpc_lb_policy_pick_args; /// Closure to run when pick is complete, if not completed synchronously.
grpc_closure* on_complete;
/// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call.
grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if needed.
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
/// Upon success, \a *user_data will be set to whatever opaque information
/// may need to be propagated from the LB policy, or NULL if not needed.
void** user_data;
/// Next pointer. For internal use by LB policy.
struct grpc_lb_policy_pick_state* next;
} grpc_lb_policy_pick_state;
struct grpc_lb_policy_vtable { struct grpc_lb_policy_vtable {
void (*destroy)(grpc_lb_policy* policy); void (*destroy)(grpc_lb_policy* policy);
void (*shutdown_locked)(grpc_lb_policy* policy);
/// \see grpc_lb_policy_shutdown_locked().
void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
/** \see grpc_lb_policy_pick */ /** \see grpc_lb_policy_pick */
int (*pick_locked)(grpc_lb_policy* policy, int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete);
/** \see grpc_lb_policy_cancel_pick */ /** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_lb_policy* policy, void (*cancel_pick_locked)(grpc_lb_policy* policy,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error); grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */ /** \see grpc_lb_policy_cancel_picks */
@ -103,37 +114,19 @@ struct grpc_lb_policy_vtable {
}; };
#ifndef NDEBUG #ifndef NDEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \ #define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r)) grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(p, r) \ #define GRPC_LB_POLICY_UNREF(p, r) \
grpc_lb_policy_unref((p), __FILE__, __LINE__, (r)) grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
/* Weak references: they don't prevent the shutdown of the LB policy. When no
* strong references are left but there are still weak ones, shutdown is called.
* Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line, void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason); const char* reason);
void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line, void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
const char* reason); const char* reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line, #else // !NDEBUG
const char* reason);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
int line, const char* reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p)) #define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p)) #define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
void grpc_lb_policy_ref(grpc_lb_policy* policy); void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_lb_policy* policy); void grpc_lb_policy_unref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
#endif #endif
/** called by concrete implementations to initialize the base struct */ /** called by concrete implementations to initialize the base struct */
@ -141,40 +134,37 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable, const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner); grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args. /// Shuts down \a policy.
/// If \a new_policy is non-null, any pending picks will be restarted
\a target will be set to the selected subchannel, or NULL on failure /// on that policy; otherwise, they will be failed.
or when the LB policy decides to drop the call. void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
grpc_lb_policy* new_policy);
Upon success, \a user_data will be set to whatever opaque information /** Finds an appropriate subchannel for a call, based on data in \a pick.
may need to be propagated from the LB policy, or NULL if not needed. \a pick must remain alive until the pick is complete.
\a context will be populated with context to pass to the subchannel
call, if needed.
If the pick succeeds and a result is known immediately, a non-zero If the pick succeeds and a result is known immediately, a non-zero
value will be returned. Otherwise, \a on_complete will be invoked value will be returned. Otherwise, \a pick->on_complete will be invoked
once the pick is complete with its error argument set to indicate once the pick is complete with its error argument set to indicate
success or failure. success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */ in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy, int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args, grpc_lb_policy_pick_state* pick);
grpc_connected_subchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping) /** Perform a connected subchannel ping (see \a
grpc_core::ConnectedSubchannel::Ping)
against one of the connected subchannels managed by \a policy. */ against one of the connected subchannels managed by \a policy. */
void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy, void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* on_initiate, grpc_closure* on_initiate,
grpc_closure* on_ack); grpc_closure* on_ack);
/** Cancel picks for \a target. /** Cancel picks for \a pick.
The \a on_complete callback of the pending picks will be invoked with \a The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */ *target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy, void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error); grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given /** Cancel all pending picks for which their \a initial_metadata_flags (as given

@ -54,7 +54,7 @@
* operations in progress over the old RR instance. This is done by * operations in progress over the old RR instance. This is done by
* decreasing the reference count on the old policy. The moment no more * decreasing the reference count on the old policy. The moment no more
* references are held on the old RR policy, it'll be destroyed and \a * references are held on the old RR policy, it'll be destroyed and \a
* glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which * state. At this point we can transition to a new RR instance safely, which
* is done once again via \a rr_handover_locked(). * is done once again via \a rr_handover_locked().
* *
@ -128,187 +128,48 @@
grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb"); grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
/* add lb_token of selected subchannel (address) to the call's initial struct glb_lb_policy;
* metadata */
static grpc_error* initial_metadata_add_lb_token(
grpc_metadata_batch* initial_metadata,
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
lb_token);
}
static void destroy_client_stats(void* arg) { namespace {
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
}
typedef struct wrapped_rr_closure_arg {
/* the closure instance using this struct as argument */
grpc_closure wrapper_closure;
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
grpc_closure* wrapped_closure;
/* the pick's initial metadata, kept in order to append the LB token for the
* pick */
grpc_metadata_batch* initial_metadata;
/* the picked target, used to determine which LB token to add to the pick's
* initial metadata */
grpc_connected_subchannel** target;
/* the context to be populated for the subchannel call */
grpc_call_context_element* context;
/* Stats for client-side load reporting. Note that this holds a /// Linked list of pending pick requests. It stores all information needed to
* reference, which must be either passed on via context or unreffed. */ /// eventually call (Round Robin's) pick() on them. They mainly stay pending
/// waiting for the RR policy to be created.
///
/// Note that when a pick is sent to the RR policy, we inject our own
/// on_complete callback, so that we can intercept the result before
/// invoking the original on_complete callback. This allows us to set the
/// LB token metadata and add client_stats to the call context.
/// See \a pending_pick_complete() for details.
struct pending_pick {
// Our on_complete closure and the original one.
grpc_closure on_complete;
grpc_closure* original_on_complete;
// The original pick.
grpc_lb_policy_pick_state* pick;
// Stats for client-side load reporting. Note that this holds a
// reference, which must be either passed on via context or unreffed.
grpc_grpclb_client_stats* client_stats; grpc_grpclb_client_stats* client_stats;
// The LB token associated with the pick. This is set via user_data in
/* the LB token associated with the pick */ // the pick.
grpc_mdelem lb_token; grpc_mdelem lb_token;
// The grpclb instance that created the wrapping. This instance is not owned,
/* storage for the lb token initial metadata mdelem */ // reference counts are untouched. It's used only for logging purposes.
grpc_linked_mdelem* lb_token_mdelem_storage; glb_lb_policy* glb_policy;
// Next pending pick.
/* The RR instance related to the closure */
grpc_lb_policy* rr_policy;
/* The grpclb instance that created the wrapping. This instance is not owned,
* reference counts are untouched. It's used only for logging purposes. */
grpc_lb_policy* glb_policy;
/* heap memory to be freed upon closure execution. */
void* free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(void* arg, grpc_error* error) {
wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
if (wc_arg->rr_policy != nullptr) {
/* if *target is nullptr, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
if (*wc_arg->target != nullptr) {
if (!GRPC_MDISNULL(wc_arg->lb_token)) {
initial_metadata_add_lb_token(wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
} else {
gpr_log(
GPR_ERROR,
"[grpclb %p] No LB token for connected subchannel pick %p (from RR "
"instance %p).",
wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(wc_arg->client_stats != nullptr);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
} else {
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
}
GPR_ASSERT(wc_arg->free_when_done != nullptr);
gpr_free(wc_arg->free_when_done);
}
namespace {
/* Linked list of pending pick requests. It stores all information needed to
* eventually call (Round Robin's) pick() on them. They mainly stay pending
* waiting for the RR policy to be created/updated.
*
* One particularity is the wrapping of the user-provided \a on_complete closure
* (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
* order to correctly unref the RR policy instance upon completion of the pick.
* See \a wrapped_rr_closure for details. */
struct pending_pick {
struct pending_pick* next; struct pending_pick* next;
/* original pick()'s arguments */
grpc_lb_policy_pick_args pick_args;
/* output argument where to store the pick()ed connected subchannel, or
* nullptr upon error. */
grpc_connected_subchannel** target;
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
}; };
} // namespace
static void add_pending_pick(pending_pick** root, /// A linked list of pending pings waiting for the RR policy to be created.
const grpc_lb_policy_pick_args* pick_args, struct pending_ping {
grpc_connected_subchannel** target, grpc_closure* on_initiate;
grpc_call_context_element* context, grpc_closure* on_ack;
grpc_closure* on_complete) {
pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
pp->wrapped_on_complete_arg.target = target;
pp->wrapped_on_complete_arg.context = context;
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
pp->wrapped_on_complete_arg.free_when_done = pp;
GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
wrapped_rr_closure, &pp->wrapped_on_complete_arg,
grpc_schedule_on_exec_ctx);
*root = pp;
}
/* Same as the \a pending_pick struct but for ping operations */
typedef struct pending_ping {
struct pending_ping* next; struct pending_ping* next;
};
/* args for sending the ping */ } // namespace
wrapped_rr_closure_arg* on_initiate;
wrapped_rr_closure_arg* on_ack;
} pending_ping;
static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
grpc_closure* on_ack) {
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
if (on_initiate != nullptr) {
pping->on_initiate =
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
pping->on_initiate->wrapped_closure = on_initiate;
pping->on_initiate->free_when_done = pping->on_initiate;
GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
&pping->on_initiate, grpc_schedule_on_exec_ctx);
}
if (on_ack != nullptr) {
pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
pping->on_ack->wrapped_closure = on_ack;
pping->on_ack->free_when_done = pping->on_ack;
GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
&pping->on_ack, grpc_schedule_on_exec_ctx);
}
pping->next = *root;
*root = pping;
}
/*
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
typedef struct glb_lb_policy { struct glb_lb_policy {
/** base policy: must be first */ /** base policy: must be first */
grpc_lb_policy base; grpc_lb_policy base;
@ -333,6 +194,9 @@ typedef struct glb_lb_policy {
/** the RR policy to use of the backend servers returned by the LB server */ /** the RR policy to use of the backend servers returned by the LB server */
grpc_lb_policy* rr_policy; grpc_lb_policy* rr_policy;
grpc_closure on_rr_connectivity_changed;
grpc_connectivity_state rr_connectivity_state;
bool started_picking; bool started_picking;
/** our connectivity state tracker */ /** our connectivity state tracker */
@ -437,15 +301,87 @@ typedef struct glb_lb_policy {
grpc_closure client_load_report_closure; grpc_closure client_load_report_closure;
/* Client load report message payload. */ /* Client load report message payload. */
grpc_byte_buffer* client_load_report_payload; grpc_byte_buffer* client_load_report_payload;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
struct rr_connectivity_data {
grpc_closure on_change;
grpc_connectivity_state state;
glb_lb_policy* glb_policy;
}; };
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
static grpc_error* initial_metadata_add_lb_token(
grpc_metadata_batch* initial_metadata,
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
lb_token);
}
static void destroy_client_stats(void* arg) {
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
}
static void pending_pick_set_metadata_and_context(pending_pick* pp) {
/* if connected_subchannel is nullptr, no pick has been made by the RR
* policy (e.g., all addresses failed to connect). There won't be any
* user_data/token available */
if (pp->pick->connected_subchannel != nullptr) {
if (!GRPC_MDISNULL(pp->lb_token)) {
initial_metadata_add_lb_token(pp->pick->initial_metadata,
&pp->pick->lb_token_mdelem_storage,
GRPC_MDELEM_REF(pp->lb_token));
} else {
gpr_log(GPR_ERROR,
"[grpclb %p] No LB token for connected subchannel pick %p",
pp->glb_policy, pp->pick);
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(pp->client_stats != nullptr);
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
pp->client_stats;
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
destroy_client_stats;
} else {
if (pp->client_stats != nullptr) {
grpc_grpclb_client_stats_unref(pp->client_stats);
}
}
}
/* The \a on_complete closure passed as part of the pick requires keeping a
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
static void pending_pick_complete(void* arg, grpc_error* error) {
pending_pick* pp = (pending_pick*)arg;
pending_pick_set_metadata_and_context(pp);
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
grpc_lb_policy_pick_state* pick) {
pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
pp->pick = pick;
pp->glb_policy = glb_policy;
GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
grpc_schedule_on_exec_ctx);
pp->original_on_complete = pick->on_complete;
pp->pick->on_complete = &pp->on_complete;
return pp;
}
static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
new_pp->next = *root;
*root = new_pp;
}
static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
grpc_closure* on_ack) {
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
pping->on_initiate = on_initiate;
pping->on_ack = on_ack;
pping->next = *root;
*root = pping;
}
static bool is_server_valid(const grpc_grpclb_server* server, size_t idx, static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
bool log) { bool log) {
if (server->drop) return false; if (server->drop) return false;
@ -557,7 +493,6 @@ static grpc_lb_addresses* process_serverlist_locked(
gpr_free(uri); gpr_free(uri);
user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload; user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
} }
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len, grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
false /* is_balancer */, false /* is_balancer */,
nullptr /* balancer_name */, user_data); nullptr /* balancer_name */, user_data);
@ -598,7 +533,6 @@ static void update_lb_connectivity_status_locked(
grpc_error* rr_state_error) { grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state = const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker); grpc_connectivity_state_check(&glb_policy->state_tracker);
/* The new connectivity status is a function of the previous one and the new /* The new connectivity status is a function of the previous one and the new
* input coming from the status of the RR policy. * input coming from the status of the RR policy.
* *
@ -628,7 +562,6 @@ static void update_lb_connectivity_status_locked(
* *
* (*) This function mustn't be called during shutting down. */ * (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN); GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
switch (rr_state) { switch (rr_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE: case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN: case GRPC_CHANNEL_SHUTDOWN:
@ -639,7 +572,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE); GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
} }
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log( gpr_log(
GPR_INFO, GPR_INFO,
@ -657,10 +589,8 @@ static void update_lb_connectivity_status_locked(
* cleanups this callback would otherwise be responsible for. * cleanups this callback would otherwise be responsible for.
* If \a force_async is true, then we will manually schedule the * If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */ * completion callback even if the pick is available immediately. */
static bool pick_from_internal_rr_locked( static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args, bool force_async, pending_pick* pp) {
bool force_async, grpc_connected_subchannel** target,
wrapped_rr_closure_arg* wc_arg) {
// Check for drops if we are not using fallback backend addresses. // Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != nullptr) { if (glb_policy->serverlist != nullptr) {
// Look at the index into the serverlist to see if we should drop this call. // Look at the index into the serverlist to see if we should drop this call.
@ -670,57 +600,36 @@ static bool pick_from_internal_rr_locked(
glb_policy->serverlist_index = 0; // Wrap-around. glb_policy->serverlist_index = 0; // Wrap-around.
} }
if (server->drop) { if (server->drop) {
// Not using the RR policy, so unref it.
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of // Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in // dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a // the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter) // subchannel call (and therefore no client_load_reporting filter)
// for dropped calls. // for dropped calls.
GPR_ASSERT(wc_arg->client_stats != nullptr); GPR_ASSERT(glb_policy->client_stats != nullptr);
grpc_grpclb_client_stats_add_call_dropped_locked( grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats); server->load_balance_token, glb_policy->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) { if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != nullptr); GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE); gpr_free(pp);
gpr_free(wc_arg->free_when_done);
return false; return false;
} }
gpr_free(wc_arg->free_when_done); gpr_free(pp);
return true; return true;
} }
} }
// Set client_stats and user_data.
pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
GPR_ASSERT(pp->pick->user_data == nullptr);
pp->pick->user_data = (void**)&pp->lb_token;
// Pick via the RR policy. // Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked( bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
wc_arg->rr_policy, pick_args, target, wc_arg->context,
(void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) { if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ pending_pick_set_metadata_and_context(pp);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(pick_args->initial_metadata,
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(wc_arg->client_stats != nullptr);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
if (force_async) { if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != nullptr); GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE); pick_done = false;
gpr_free(wc_arg->free_when_done);
return false;
} }
gpr_free(wc_arg->free_when_done); gpr_free(pp);
} }
/* else, the pending pick will be registered and taken care of by the /* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy). * pending pick list inside the RR policy (glb_policy->rr_policy).
@ -762,7 +671,7 @@ static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
gpr_free(args); gpr_free(args);
} }
static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error); static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
static void create_rr_locked(glb_lb_policy* glb_policy, static void create_rr_locked(glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) { grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == nullptr); GPR_ASSERT(glb_policy->rr_policy == nullptr);
@ -784,72 +693,46 @@ static void create_rr_locked(glb_lb_policy* glb_policy,
glb_policy->base.request_reresolution = nullptr; glb_policy->base.request_reresolution = nullptr;
glb_policy->rr_policy = new_rr_policy; glb_policy->rr_policy = new_rr_policy;
grpc_error* rr_state_error = nullptr; grpc_error* rr_state_error = nullptr;
const grpc_connectivity_state rr_state = glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy, glb_policy->rr_policy, &rr_state_error);
&rr_state_error);
/* Connectivity state is a function of the RR policy updated/created */ /* Connectivity state is a function of the RR policy updated/created */
update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error); update_lb_connectivity_status_locked(
glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
/* Add the gRPC LB's interested_parties pollset_set to that of the newly /* Add the gRPC LB's interested_parties pollset_set to that of the newly
* created RR policy. This will make the RR policy progress upon activity on * created RR policy. This will make the RR policy progress upon activity on
* gRPC LB, which in turn is tied to the application's call */ * gRPC LB, which in turn is tied to the application's call */
grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties, grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties); glb_policy->base.interested_parties);
GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
/* Allocate the data for the tracking of the new RR policy's connectivity. on_rr_connectivity_changed_locked, glb_policy,
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data* rr_connectivity =
(rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = rr_state;
/* Subscribe to changes to the connectivity of the new RR */ /* Subscribe to changes to the connectivity of the new RR */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb"); GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy, grpc_lb_policy_notify_on_state_change_locked(
&rr_connectivity->state, glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
&rr_connectivity->on_change); &glb_policy->on_rr_connectivity_changed);
grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy); grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
// Send pending picks to RR policy.
/* Update picks and pings in wait */
pending_pick* pp; pending_pick* pp;
while ((pp = glb_policy->pending_picks)) { while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next; glb_policy->pending_picks = pp->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
pp->wrapped_on_complete_arg.client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[grpclb %p] Pending pick about to (async) PICK from RR %p", "[grpclb %p] Pending pick about to (async) PICK from RR %p",
glb_policy, glb_policy->rr_policy); glb_policy, glb_policy->rr_policy);
} }
pick_from_internal_rr_locked(glb_policy, &pp->pick_args, pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
true /* force_async */, pp->target,
&pp->wrapped_on_complete_arg);
} }
// Send pending pings to RR policy.
pending_ping* pping; pending_ping* pping;
while ((pping = glb_policy->pending_pings)) { while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next; glb_policy->pending_pings = pping->next;
grpc_closure* on_initiate = nullptr;
grpc_closure* on_ack = nullptr;
if (pping->on_initiate != nullptr) {
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->on_initiate->rr_policy = glb_policy->rr_policy;
on_initiate = &pping->on_initiate->wrapper_closure;
}
if (pping->on_ack != nullptr) {
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->on_ack->rr_policy = glb_policy->rr_policy;
on_ack = &pping->on_ack->wrapper_closure;
}
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p", gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy); glb_policy, glb_policy->rr_policy);
} }
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack); grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
pping->on_ack);
gpr_free(pping); gpr_free(pping);
} }
} }
@ -875,31 +758,28 @@ static void rr_handover_locked(glb_lb_policy* glb_policy) {
lb_policy_args_destroy(args); lb_policy_args_destroy(args);
} }
static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) { static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg; glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) { if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
gpr_free(rr_connectivity);
return; return;
} }
if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) { if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
/* An RR policy that has transitioned into the SHUTDOWN connectivity state /* An RR policy that has transitioned into the SHUTDOWN connectivity state
* should not be considered for picks or updates: the SHUTDOWN state is a * should not be considered for picks or updates: the SHUTDOWN state is a
* sink, policies can't transition back from it. .*/ * sink, policies can't transition back from it. .*/
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown"); GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
glb_policy->rr_policy = nullptr; glb_policy->rr_policy = nullptr;
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
gpr_free(rr_connectivity);
return; return;
} }
/* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */ /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state, update_lb_connectivity_status_locked(
GRPC_ERROR_REF(error)); glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
/* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */ /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy, grpc_lb_policy_notify_on_state_change_locked(
&rr_connectivity->state, glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
&rr_connectivity->on_change); &glb_policy->on_rr_connectivity_changed);
} }
static void destroy_balancer_name(void* balancer_name) { static void destroy_balancer_name(void* balancer_name) {
@ -1007,22 +887,17 @@ static void glb_destroy(grpc_lb_policy* pol) {
gpr_free(glb_policy); gpr_free(glb_policy);
} }
static void glb_shutdown_locked(grpc_lb_policy* pol) { static void glb_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol; glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true; glb_policy->shutting_down = true;
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
grpc_call* lb_call = glb_policy->lb_call;
/* glb_policy->lb_call and this local lb_call must be consistent at this point /* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while * of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */ * glb_policy->shutting_down is false. */
if (lb_call != nullptr) { if (glb_policy->lb_call != nullptr) {
grpc_call_cancel(lb_call, nullptr); grpc_call_cancel(glb_policy->lb_call, nullptr);
/* lb_on_server_status_received will pick up the cancel and clean up */ /* lb_on_server_status_received will pick up the cancel and clean up */
} }
if (glb_policy->retry_timer_callback_pending) { if (glb_policy->retry_timer_callback_pending) {
@ -1031,12 +906,8 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
if (glb_policy->fallback_timer_callback_pending) { if (glb_policy->fallback_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_fallback_timer); grpc_timer_cancel(&glb_policy->lb_fallback_timer);
} }
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
pending_ping* pping = glb_policy->pending_pings;
glb_policy->pending_pings = nullptr;
if (glb_policy->rr_policy != nullptr) { if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown"); GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
} else { } else {
grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED); grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
@ -1051,28 +922,35 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
} }
grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN, grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "glb_shutdown"); GRPC_ERROR_REF(error), "glb_shutdown");
// Clear pending picks.
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; pending_pick* next = pp->next;
*pp->target = nullptr; if (new_policy != nullptr) {
GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure, // Hand pick over to new policy.
GRPC_ERROR_REF(error)); if (pp->client_stats != nullptr) {
gpr_free(pp); grpc_grpclb_client_stats_unref(pp->client_stats);
}
pp->pick->on_complete = pp->original_on_complete;
if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
// Synchronous return; schedule callback.
GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
}
gpr_free(pp);
} else {
pp->pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
}
pp = next; pp = next;
} }
// Clear pending pings.
pending_ping* pping = glb_policy->pending_pings;
glb_policy->pending_pings = nullptr;
while (pping != nullptr) { while (pping != nullptr) {
pending_ping* next = pping->next; pending_ping* next = pping->next;
if (pping->on_initiate != nullptr) { GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure, GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
GRPC_ERROR_REF(error));
gpr_free(pping->on_initiate);
}
if (pping->on_ack != nullptr) {
GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
GRPC_ERROR_REF(error));
gpr_free(pping->on_ack);
}
gpr_free(pping); gpr_free(pping);
pping = next; pping = next;
} }
@ -1090,16 +968,16 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these, // level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to nullptr right here. // we invoke the completion closure and set *target to nullptr right here.
static void glb_cancel_pick_locked(grpc_lb_policy* pol, static void glb_cancel_pick_locked(grpc_lb_policy* pol,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error) { grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol; glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks; pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr; glb_policy->pending_picks = nullptr;
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; pending_pick* next = pp->next;
if (pp->target == target) { if (pp->pick == pick) {
*target = nullptr; pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
} else { } else {
@ -1109,7 +987,7 @@ static void glb_cancel_pick_locked(grpc_lb_policy* pol,
pp = next; pp = next;
} }
if (glb_policy->rr_policy != nullptr) { if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target, grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
@ -1134,9 +1012,9 @@ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
glb_policy->pending_picks = nullptr; glb_policy->pending_picks = nullptr;
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; pending_pick* next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure, GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
} else { } else {
@ -1162,7 +1040,7 @@ static void start_picking_locked(glb_lb_policy* glb_policy) {
!glb_policy->fallback_timer_callback_pending) { !glb_policy->fallback_timer_callback_pending) {
grpc_millis deadline = grpc_millis deadline =
grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms; grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer"); GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked, GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
@ -1184,19 +1062,9 @@ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
} }
static int glb_pick_locked(grpc_lb_policy* pol, static int glb_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args, grpc_lb_policy_pick_state* pick) {
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
if (pick_args->lb_token_mdelem_storage == nullptr) {
*target = nullptr;
GRPC_CLOSURE_SCHED(on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No mdelem storage for the LB token. Load reporting "
"won't work without it. Failing"));
return 0;
}
glb_lb_policy* glb_policy = (glb_lb_policy*)pol; glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = pending_pick_create(glb_policy, pick);
bool pick_done = false; bool pick_done = false;
if (glb_policy->rr_policy != nullptr) { if (glb_policy->rr_policy != nullptr) {
const grpc_connectivity_state rr_connectivity_state = const grpc_connectivity_state rr_connectivity_state =
@ -1204,7 +1072,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
nullptr); nullptr);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event // callback registered to capture this event
// (glb_rr_connectivity_changed_locked) may not have been invoked yet. We // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
// need to make sure we aren't trying to pick from a RR policy instance // need to make sure we aren't trying to pick from a RR policy instance
// that's in shutdown. // that's in shutdown.
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
@ -1214,32 +1082,16 @@ static int glb_pick_locked(grpc_lb_policy* pol,
glb_policy, glb_policy->rr_policy, glb_policy, glb_policy->rr_policy,
grpc_connectivity_state_name(rr_connectivity_state)); grpc_connectivity_state_name(rr_connectivity_state));
} }
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, pending_pick_add(&glb_policy->pending_picks, pp);
on_complete);
pick_done = false; pick_done = false;
} else { // RR not in shutdown } else { // RR not in shutdown
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy, gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
glb_policy->rr_policy); glb_policy->rr_policy);
} }
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
wrapped_rr_closure_arg* wc_arg =
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->context = context;
GPR_ASSERT(glb_policy->client_stats != nullptr); GPR_ASSERT(glb_policy->client_stats != nullptr);
wc_arg->client_stats = pick_done =
grpc_grpclb_client_stats_ref(glb_policy->client_stats); pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
wc_arg->wrapped_closure = on_complete;
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
wc_arg->initial_metadata = pick_args->initial_metadata;
wc_arg->free_when_done = wc_arg;
wc_arg->glb_policy = pol;
pick_done = pick_from_internal_rr_locked(
glb_policy, pick_args, false /* force_async */, target, wc_arg);
} }
} else { // glb_policy->rr_policy == NULL } else { // glb_policy->rr_policy == NULL
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
@ -1247,8 +1099,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks", "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
glb_policy); glb_policy);
} }
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, pending_pick_add(&glb_policy->pending_picks, pp);
on_complete);
if (!glb_policy->started_picking) { if (!glb_policy->started_picking) {
start_picking_locked(glb_policy); start_picking_locked(glb_policy);
} }
@ -1270,7 +1121,7 @@ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
if (glb_policy->rr_policy) { if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack); grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else { } else {
add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack); pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
if (!glb_policy->started_picking) { if (!glb_policy->started_picking) {
start_picking_locked(glb_policy); start_picking_locked(glb_policy);
} }
@ -1295,7 +1146,7 @@ static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
} }
query_for_backends_locked(glb_policy); query_for_backends_locked(glb_policy);
} }
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
} }
static void maybe_restart_lb_call(glb_lb_policy* glb_policy) { static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
@ -1321,7 +1172,7 @@ static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
glb_policy); glb_policy);
} }
} }
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy, lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_combiner_scheduler(glb_policy->base.combiner));
@ -1329,8 +1180,8 @@ static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try, grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry); &glb_policy->lb_on_call_retry);
} }
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, GRPC_LB_POLICY_UNREF(&glb_policy->base,
"lb_on_server_status_received_locked"); "lb_on_server_status_received_locked");
} }
static void send_client_load_report_locked(void* arg, grpc_error* error); static void send_client_load_report_locked(void* arg, grpc_error* error);
@ -1353,7 +1204,7 @@ static void client_load_report_done_locked(void* arg, grpc_error* error) {
glb_policy->client_load_report_payload = nullptr; glb_policy->client_load_report_payload = nullptr;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) { if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
glb_policy->client_load_report_timer_callback_pending = false; glb_policy->client_load_report_timer_callback_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) { if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy); maybe_restart_lb_call(glb_policy);
} }
@ -1394,7 +1245,7 @@ static void send_client_load_report_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg; glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) { if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
glb_policy->client_load_report_timer_callback_pending = false; glb_policy->client_load_report_timer_callback_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) { if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy); maybe_restart_lb_call(glb_policy);
} }
@ -1547,10 +1398,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0; op->flags = 0;
op->reserved = nullptr; op->reserved = nullptr;
op++; op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref /* take a ref to be released in lb_on_sent_initial_request_locked() */
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
"lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute( call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops), glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request); &glb_policy->lb_on_sent_initial_request);
@ -1566,10 +1415,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0; op->flags = 0;
op->reserved = nullptr; op->reserved = nullptr;
op++; op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref /* take a ref to be released in lb_on_server_status_received_locked() */
* count goes to zero) to be unref'd in lb_on_server_status_received_locked */ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
"lb_on_server_status_received_locked");
call_error = grpc_call_start_batch_and_execute( call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops), glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_server_status_received); &glb_policy->lb_on_server_status_received);
@ -1581,9 +1428,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0; op->flags = 0;
op->reserved = nullptr; op->reserved = nullptr;
op++; op++;
/* take another weak ref to be unref'd/reused in /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
* lb_on_response_received_locked */ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
call_error = grpc_call_start_batch_and_execute( call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops), glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); &glb_policy->lb_on_response_received);
@ -1598,8 +1444,7 @@ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
if (glb_policy->client_load_report_payload != nullptr) { if (glb_policy->client_load_report_payload != nullptr) {
do_send_client_load_report_locked(glb_policy); do_send_client_load_report_locked(glb_policy);
} }
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
"lb_on_sent_initial_request_locked");
} }
static void lb_on_response_received_locked(void* arg, grpc_error* error) { static void lb_on_response_received_locked(void* arg, grpc_error* error) {
@ -1631,11 +1476,9 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
"client load reporting interval = %" PRIdPTR " milliseconds", "client load reporting interval = %" PRIdPTR " milliseconds",
glb_policy, glb_policy->client_stats_report_interval); glb_policy, glb_policy->client_stats_report_interval);
} }
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the /* take a ref to be unref'd in send_client_load_report_locked() */
* strong ref count goes to zero) to be unref'd in
* send_client_load_report_locked() */
glb_policy->client_load_report_timer_callback_pending = true; glb_policy->client_load_report_timer_callback_pending = true;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report"); GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(glb_policy); schedule_next_client_load_report(glb_policy);
} else if (grpc_lb_glb_trace.enabled()) { } else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
@ -1717,21 +1560,21 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
op->flags = 0; op->flags = 0;
op->reserved = nullptr; op->reserved = nullptr;
op++; op++;
/* reuse the "lb_on_response_received_locked" weak ref taken in /* reuse the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() */ * query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute( const grpc_call_error call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops), glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error); GPR_ASSERT(GRPC_CALL_OK == call_error);
} else { } else {
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, GRPC_LB_POLICY_UNREF(&glb_policy->base,
"lb_on_response_received_locked_shutdown"); "lb_on_response_received_locked_shutdown");
} }
} else { /* empty payload: call cancelled. */ } else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in /* dispose of the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() and reused in every reception loop */ * query_for_backends_locked() and reused in every reception loop */
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, GRPC_LB_POLICY_UNREF(&glb_policy->base,
"lb_on_response_received_locked_empty_payload"); "lb_on_response_received_locked_empty_payload");
} }
} }
@ -1751,7 +1594,7 @@ static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
rr_handover_locked(glb_policy); rr_handover_locked(glb_policy);
} }
} }
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer"); GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
} }
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) { static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
@ -1835,7 +1678,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
grpc_channel_get_channel_stack(glb_policy->lb_channel)); grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
glb_policy->watching_lb_channel = true; glb_policy->watching_lb_channel = true;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity"); GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
grpc_client_channel_watch_connectivity_state( grpc_client_channel_watch_connectivity_state(
client_channel_elem, client_channel_elem,
grpc_polling_entity_create_from_pollset_set( grpc_polling_entity_create_from_pollset_set(
@ -1891,8 +1734,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
case GRPC_CHANNEL_SHUTDOWN: case GRPC_CHANNEL_SHUTDOWN:
done: done:
glb_policy->watching_lb_channel = false; glb_policy->watching_lb_channel = false;
GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, GRPC_LB_POLICY_UNREF(&glb_policy->base,
"watch_lb_channel_connectivity_cb_shutdown"); "watch_lb_channel_connectivity_cb_shutdown");
break; break;
} }
} }

@ -31,15 +31,6 @@
grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first"); grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
namespace {
struct pending_pick {
struct pending_pick* next;
uint32_t initial_metadata_flags;
grpc_connected_subchannel** target;
grpc_closure* on_complete;
};
} // namespace
typedef struct { typedef struct {
/** base policy: must be first */ /** base policy: must be first */
grpc_lb_policy base; grpc_lb_policy base;
@ -54,7 +45,7 @@ typedef struct {
/** are we shut down? */ /** are we shut down? */
bool shutdown; bool shutdown;
/** list of picks that are waiting on connectivity */ /** list of picks that are waiting on connectivity */
pending_pick* pending_picks; grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */ /** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker; grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy; } pick_first_lb_policy;
@ -72,19 +63,27 @@ static void pf_destroy(grpc_lb_policy* pol) {
} }
} }
static void pf_shutdown_locked(grpc_lb_policy* pol) { static void pf_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol; pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p); gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
} }
p->shutdown = true; p->shutdown = true;
pending_pick* pp; grpc_lb_policy_pick_state* pick;
while ((pp = p->pending_picks) != nullptr) { while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pp->next; p->pending_picks = pick->next;
*pp->target = nullptr; if (new_policy != nullptr) {
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error)); // Hand off to new LB policy.
gpr_free(pp); if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return, schedule closure.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
} }
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN, grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown"); GRPC_ERROR_REF(error), "shutdown");
@ -104,19 +103,18 @@ static void pf_shutdown_locked(grpc_lb_policy* pol) {
} }
static void pf_cancel_pick_locked(grpc_lb_policy* pol, static void pf_cancel_pick_locked(grpc_lb_policy* pol,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error) { grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol; pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks; grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr; p->pending_picks = nullptr;
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; grpc_lb_policy_pick_state* next = pp->next;
if (pp->target == target) { if (pp == pick) {
*target = nullptr; pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
p->pending_picks = pp; p->pending_picks = pp;
@ -131,21 +129,20 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq, uint32_t initial_metadata_flags_eq,
grpc_error* error) { grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol; pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks; grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr; p->pending_picks = nullptr;
while (pp != nullptr) { while (pick != nullptr) {
pending_pick* next = pp->next; grpc_lb_policy_pick_state* next = pick->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1)); "Pick Cancelled", &error, 1));
gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pick->next = p->pending_picks;
p->pending_picks = pp; p->pending_picks = pick;
} }
pp = next; pick = next;
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -175,27 +172,19 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
} }
static int pf_pick_locked(grpc_lb_policy* pol, static int pf_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args, grpc_lb_policy_pick_state* pick) {
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol; pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously. // If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) { if (p->selected != nullptr) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel, pick->connected_subchannel = p->selected->connected_subchannel;
"picked");
return 1; return 1;
} }
// No subchannel selected yet, so handle asynchronously. // No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) { if (!p->started_picking) {
start_picking_locked(p); start_picking_locked(p);
} }
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp)); pick->next = p->pending_picks;
pp->next = p->pending_picks; p->pending_picks = pick;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
p->pending_picks = pp;
return 0; return 0;
} }
@ -227,8 +216,7 @@ static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) { grpc_closure* on_ack) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol; pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) { if (p->selected) {
grpc_connected_subchannel_ping(p->selected->connected_subchannel, p->selected->connected_subchannel->Ping(on_initiate, on_ack);
on_initiate, on_ack);
} else { } else {
GRPC_CLOSURE_SCHED(on_initiate, GRPC_CLOSURE_SCHED(on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
@ -307,8 +295,7 @@ static void pf_update_locked(grpc_lb_policy* policy,
subchannel_list->num_subchannels); subchannel_list->num_subchannels);
} }
if (p->selected->connected_subchannel != nullptr) { if (p->selected->connected_subchannel != nullptr) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF( sd->connected_subchannel = p->selected->connected_subchannel;
p->selected->connected_subchannel, "pf_update_includes_selected");
} }
p->selected = sd; p->selected = sd;
if (p->subchannel_list != nullptr) { if (p->subchannel_list != nullptr) {
@ -420,8 +407,8 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// re-resolution is introduced. But we need to investigate whether we // re-resolution is introduced. But we need to investigate whether we
// really want to take any action instead of waiting for the selected // really want to take any action instead of waiting for the selected
// subchannel reconnecting. // subchannel reconnecting.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN || GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected channel goes bad, request a re-resolution. // If the selected channel goes bad, request a re-resolution.
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, GRPC_ERROR_NONE,
@ -429,20 +416,19 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
p->started_picking = false; p->started_picking = false;
grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace, grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
// in transient failure. Rely on re-resolution to recover.
p->selected = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch(
sd->subchannel_list, "pf_selected_shutdown");
grpc_lb_subchannel_data_unref_subchannel(
sd, "pf_selected_shutdown"); // Unrefs connected subchannel
} else { } else {
grpc_connectivity_state_set(&p->state_tracker, grpc_connectivity_state_set(&p->state_tracker,
sd->curr_connectivity_state, sd->curr_connectivity_state,
GRPC_ERROR_REF(error), "selected_changed"); GRPC_ERROR_REF(error), "selected_changed");
}
if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
// Renew notification. // Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd); grpc_lb_subchannel_data_start_connectivity_watch(sd);
} else {
p->selected = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch(
sd->subchannel_list, "pf_selected_shutdown");
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_selected_shutdown");
} }
} }
return; return;
@ -460,6 +446,8 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
case GRPC_CHANNEL_READY: { case GRPC_CHANNEL_READY: {
// Case 2. Promote p->latest_pending_subchannel_list to // Case 2. Promote p->latest_pending_subchannel_list to
// p->subchannel_list. // p->subchannel_list.
sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel);
if (sd->subchannel_list == p->latest_pending_subchannel_list) { if (sd->subchannel_list == p->latest_pending_subchannel_list) {
GPR_ASSERT(p->subchannel_list != nullptr); GPR_ASSERT(p->subchannel_list != nullptr);
grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list, grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
@ -470,9 +458,6 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Cases 1 and 2. // Cases 1 and 2.
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY, grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "connecting_ready"); GRPC_ERROR_NONE, "connecting_ready");
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
p->selected = sd; p->selected = sd;
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p, gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
@ -481,18 +466,16 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Drop all other subchannels, since we are now connected. // Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(p); destroy_unselected_subchannels_locked(p);
// Update any calls that were waiting for a pick. // Update any calls that were waiting for a pick.
pending_pick* pp; grpc_lb_policy_pick_state* pick;
while ((pp = p->pending_picks)) { while ((pick = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pick->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF( pick->connected_subchannel = p->selected->connected_subchannel;
p->selected->connected_subchannel, "picked");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p", "Servicing pending pick with selected subchannel %p",
(void*)p->selected); (void*)p->selected);
} }
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
} }
// Renew notification. // Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd); grpc_lb_subchannel_data_start_connectivity_watch(sd);
@ -531,39 +514,8 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data_start_connectivity_watch(sd); grpc_lb_subchannel_data_start_connectivity_watch(sd);
break; break;
} }
case GRPC_CHANNEL_SHUTDOWN: { case GRPC_CHANNEL_SHUTDOWN:
grpc_lb_subchannel_data_stop_connectivity_watch(sd); GPR_UNREACHABLE_CODE(break);
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_candidate_shutdown");
// Advance to next subchannel and check its state.
grpc_lb_subchannel_data* original_sd = sd;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == nullptr && sd != original_sd);
if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch(
sd->subchannel_list, "pf_exhausted_subchannels");
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE,
"exhausted_subchannels+reresolve");
p->started_picking = false;
grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
GRPC_ERROR_NONE);
}
} else {
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
&p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed");
}
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
}
}
} }
} }

@ -36,36 +36,12 @@
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/ref_counted_ptr.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h" #include "src/core/lib/transport/static_metadata.h"
grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin"); grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
namespace {
/** List of entities waiting for a pick.
*
* Once a pick is available, \a target is updated and \a on_complete called. */
struct pending_pick {
pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
uint32_t initial_metadata_flags;
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
grpc_connected_subchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
grpc_closure* on_complete;
};
} // namespace
typedef struct round_robin_lb_policy { typedef struct round_robin_lb_policy {
/** base policy: must be first */ /** base policy: must be first */
grpc_lb_policy base; grpc_lb_policy base;
@ -77,7 +53,7 @@ typedef struct round_robin_lb_policy {
/** are we shutting down? */ /** are we shutting down? */
bool shutdown; bool shutdown;
/** List of picks that are waiting on connectivity */ /** List of picks that are waiting on connectivity */
pending_pick* pending_picks; grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */ /** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker; grpc_connectivity_state_tracker state_tracker;
@ -152,7 +128,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
(void*)p, (unsigned long)last_ready_index, (void*)p, (unsigned long)last_ready_index,
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel, (void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void*)p->subchannel_list->subchannels[last_ready_index] (void*)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel); .connected_subchannel.get());
} }
} }
@ -169,19 +145,27 @@ static void rr_destroy(grpc_lb_policy* pol) {
gpr_free(p); gpr_free(p);
} }
static void rr_shutdown_locked(grpc_lb_policy* pol) { static void rr_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol; round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p); gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
} }
p->shutdown = true; p->shutdown = true;
pending_pick* pp; grpc_lb_policy_pick_state* pick;
while ((pp = p->pending_picks) != nullptr) { while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pp->next; p->pending_picks = pick->next;
*pp->target = nullptr; if (new_policy != nullptr) {
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error)); // Hand off to new LB policy.
gpr_free(pp); if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return; schedule callback.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
} }
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN, grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown"); GRPC_ERROR_REF(error), "rr_shutdown");
@ -201,19 +185,18 @@ static void rr_shutdown_locked(grpc_lb_policy* pol) {
} }
static void rr_cancel_pick_locked(grpc_lb_policy* pol, static void rr_cancel_pick_locked(grpc_lb_policy* pol,
grpc_connected_subchannel** target, grpc_lb_policy_pick_state* pick,
grpc_error* error) { grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol; round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks; grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr; p->pending_picks = nullptr;
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; grpc_lb_policy_pick_state* next = pp->next;
if (pp->target == target) { if (pp == pick) {
*target = nullptr; pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1)); "Pick cancelled", &error, 1));
gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pp->next = p->pending_picks;
p->pending_picks = pp; p->pending_picks = pp;
@ -228,22 +211,21 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq, uint32_t initial_metadata_flags_eq,
grpc_error* error) { grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol; round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks; grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr; p->pending_picks = nullptr;
while (pp != nullptr) { while (pick != nullptr) {
pending_pick* next = pp->next; grpc_lb_policy_pick_state* next = pick->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) { initial_metadata_flags_eq) {
*pp->target = nullptr; pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1)); "Pick cancelled", &error, 1));
gpr_free(pp);
} else { } else {
pp->next = p->pending_picks; pick->next = p->pending_picks;
p->pending_picks = pp; p->pending_picks = pick;
} }
pp = next; pick = next;
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -268,13 +250,10 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
} }
static int rr_pick_locked(grpc_lb_policy* pol, static int rr_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args, grpc_lb_policy_pick_state* pick) {
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol; round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol, gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown); p->shutdown);
} }
GPR_ASSERT(!p->shutdown); GPR_ASSERT(!p->shutdown);
@ -284,18 +263,17 @@ static int rr_pick_locked(grpc_lb_policy* pol,
/* readily available, report right away */ /* readily available, report right away */
grpc_lb_subchannel_data* sd = grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index]; &p->subchannel_list->subchannels[next_ready_index];
*target = pick->connected_subchannel = sd->connected_subchannel;
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked"); if (pick->user_data != nullptr) {
if (user_data != nullptr) { *pick->user_data = sd->user_data;
*user_data = sd->user_data;
} }
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, " "[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)", "index %" PRIuPTR ")",
(void*)p, (void*)sd->subchannel, (void*)*target, p, sd->subchannel, pick->connected_subchannel.get(),
(void*)sd->subchannel_list, (unsigned long)next_ready_index); sd->subchannel_list, next_ready_index);
} }
/* only advance the last picked pointer if the selection was used */ /* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index); update_last_ready_subchannel_index_locked(p, next_ready_index);
@ -306,27 +284,21 @@ static int rr_pick_locked(grpc_lb_policy* pol,
if (!p->started_picking) { if (!p->started_picking) {
start_picking_locked(p); start_picking_locked(p);
} }
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp)); pick->next = p->pending_picks;
pp->next = p->pending_picks; p->pending_picks = pick;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
return 0; return 0;
} }
static void update_state_counters_locked(grpc_lb_subchannel_data* sd) { static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list; grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
GPR_ASSERT(sd->prev_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) { if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0); GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready; --subchannel_list->num_ready;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { } else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(subchannel_list->num_transient_failures > 0); GPR_ASSERT(subchannel_list->num_transient_failures > 0);
--subchannel_list->num_transient_failures; --subchannel_list->num_transient_failures;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
GPR_ASSERT(subchannel_list->num_shutdown > 0);
--subchannel_list->num_shutdown;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) { } else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(subchannel_list->num_idle > 0); GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle; --subchannel_list->num_idle;
@ -336,8 +308,6 @@ static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
++subchannel_list->num_ready; ++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { } else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
++subchannel_list->num_transient_failures; ++subchannel_list->num_transient_failures;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
++subchannel_list->num_shutdown;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) { } else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
++subchannel_list->num_idle; ++subchannel_list->num_idle;
} }
@ -437,6 +407,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// either the current or latest pending subchannel lists. // either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list || GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
sd->subchannel_list == p->latest_pending_subchannel_list); sd->subchannel_list == p->latest_pending_subchannel_list);
GPR_ASSERT(sd->pending_connectivity_state_unsafe != GRPC_CHANNEL_SHUTDOWN);
// Now that we're inside the combiner, copy the pending connectivity // Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to // state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner. // curr_connectivity_state, which is what we use inside of the combiner.
@ -444,18 +415,17 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// Update state counters and new overall state. // Update state counters and new overall state.
update_state_counters_locked(sd); update_state_counters_locked(sd);
update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error)); update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error));
// If the sd's new state is SHUTDOWN, unref the subchannel. // If the sd's new state is TRANSIENT_FAILURE, unref the *connected*
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { // subchannel, if any.
grpc_lb_subchannel_data_stop_connectivity_watch(sd); switch (sd->curr_connectivity_state) {
grpc_lb_subchannel_data_unref_subchannel(sd, "rr_connectivity_shutdown"); case GRPC_CHANNEL_TRANSIENT_FAILURE: {
grpc_lb_subchannel_list_unref_for_connectivity_watch( sd->connected_subchannel.reset();
sd->subchannel_list, "rr_connectivity_shutdown"); break;
} else { // sd not in SHUTDOWN }
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { case GRPC_CHANNEL_READY: {
if (sd->connected_subchannel == nullptr) { if (sd->connected_subchannel == nullptr) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF( sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel), grpc_subchannel_get_connected_subchannel(sd->subchannel);
"connected");
} }
if (sd->subchannel_list != p->subchannel_list) { if (sd->subchannel_list != p->subchannel_list) {
// promote sd->subchannel_list to p->subchannel_list. // promote sd->subchannel_list to p->subchannel_list.
@ -495,13 +465,12 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// picks, update the last picked pointer // picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index); update_last_ready_subchannel_index_locked(p, next_ready_index);
} }
pending_pick* pp; grpc_lb_policy_pick_state* pick;
while ((pp = p->pending_picks)) { while ((pick = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pick->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF( pick->connected_subchannel = selected->connected_subchannel;
selected->connected_subchannel, "rr_picked"); if (pick->user_data != nullptr) {
if (pp->user_data != nullptr) { *pick->user_data = selected->user_data;
*pp->user_data = selected->user_data;
} }
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
@ -510,13 +479,17 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
(void*)p, (void*)selected->subchannel, (void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index); (void*)p->subchannel_list, (unsigned long)next_ready_index);
} }
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
} }
break;
} }
// Renew notification. case GRPC_CHANNEL_SHUTDOWN:
grpc_lb_subchannel_data_start_connectivity_watch(sd); GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:; // fallthrough
} }
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
} }
static grpc_connectivity_state rr_check_connectivity_locked( static grpc_connectivity_state rr_check_connectivity_locked(
@ -540,10 +513,9 @@ static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
if (next_ready_index < p->subchannel_list->num_subchannels) { if (next_ready_index < p->subchannel_list->num_subchannels) {
grpc_lb_subchannel_data* selected = grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index]; &p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF( grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> target =
selected->connected_subchannel, "rr_ping"); selected->connected_subchannel;
grpc_connected_subchannel_ping(target, on_initiate, on_ack); target->Ping(on_initiate, on_ack);
GRPC_CONNECTED_SUBCHANNEL_UNREF(target, "rr_ping");
} else { } else {
GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected")); "Round Robin not connected"));

@ -42,10 +42,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
} }
GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason); GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason);
sd->subchannel = nullptr; sd->subchannel = nullptr;
if (sd->connected_subchannel != nullptr) { sd->connected_subchannel.reset();
GRPC_CONNECTED_SUBCHANNEL_UNREF(sd->connected_subchannel, reason);
sd->connected_subchannel = nullptr;
}
if (sd->user_data != nullptr) { if (sd->user_data != nullptr) {
GPR_ASSERT(sd->user_data_vtable != nullptr); GPR_ASSERT(sd->user_data_vtable != nullptr);
sd->user_data_vtable->destroy(sd->user_data); sd->user_data_vtable->destroy(sd->user_data);
@ -213,13 +210,13 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_ref_for_connectivity_watch( void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) { grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason); GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason); grpc_lb_subchannel_list_ref(subchannel_list, reason);
} }
void grpc_lb_subchannel_list_unref_for_connectivity_watch( void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) { grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason); GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason); grpc_lb_subchannel_list_unref(subchannel_list, reason);
} }

@ -22,6 +22,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/ref_counted_ptr.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
// TODO(roth): This code is intended to be shared between pick_first and // TODO(roth): This code is intended to be shared between pick_first and
@ -43,7 +44,7 @@ typedef struct {
grpc_lb_subchannel_list* subchannel_list; grpc_lb_subchannel_list* subchannel_list;
/** subchannel itself */ /** subchannel itself */
grpc_subchannel* subchannel; grpc_subchannel* subchannel;
grpc_connected_subchannel* connected_subchannel; grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
/** Is a connectivity notification pending? */ /** Is a connectivity notification pending? */
bool connectivity_notification_pending; bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */ /** notification that connectivity has changed on subchannel */

@ -41,6 +41,7 @@
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/debug_location.h"
#include "src/core/lib/support/manual_constructor.h" #include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h" #include "src/core/lib/surface/channel_init.h"
@ -55,10 +56,6 @@
#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120 #define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2 #define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
((grpc_connected_subchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
namespace { namespace {
struct state_watcher { struct state_watcher {
grpc_closure closure; grpc_closure closure;
@ -98,7 +95,7 @@ struct grpc_subchannel {
grpc_connect_out_args connecting_result; grpc_connect_out_args connecting_result;
/** callback for connection finishing */ /** callback for connection finishing */
grpc_closure connected; grpc_closure on_connected;
/** callback for our alarm */ /** callback for our alarm */
grpc_closure on_alarm; grpc_closure on_alarm;
@ -107,12 +104,13 @@ struct grpc_subchannel {
being setup */ being setup */
grpc_pollset_set* pollset_set; grpc_pollset_set* pollset_set;
/** active connection, or null; of type grpc_connected_subchannel */
gpr_atm connected_subchannel;
/** mutex protecting remaining elements */ /** mutex protecting remaining elements */
gpr_mu mu; gpr_mu mu;
/** active connection, or null; of type grpc_core::ConnectedSubchannel
*/
grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
/** have we seen a disconnection? */ /** have we seen a disconnection? */
bool disconnected; bool disconnected;
/** are we connecting */ /** are we connecting */
@ -136,16 +134,15 @@ struct grpc_subchannel {
}; };
struct grpc_subchannel_call { struct grpc_subchannel_call {
grpc_connected_subchannel* connection; grpc_core::ConnectedSubchannel* connection;
grpc_closure* schedule_closure_after_destroy; grpc_closure* schedule_closure_after_destroy;
}; };
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack*)((call) + 1)) #define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack*)((call) + 1))
#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack*)(con))
#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \ #define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
(((grpc_subchannel_call*)(callstack)) - 1) (((grpc_subchannel_call*)(callstack)) - 1)
static void subchannel_connected(void* subchannel, grpc_error* error); static void on_subchannel_connected(void* subchannel, grpc_error* error);
#ifndef NDEBUG #ifndef NDEBUG
#define REF_REASON reason #define REF_REASON reason
@ -163,20 +160,9 @@ static void subchannel_connected(void* subchannel, grpc_error* error);
*/ */
static void connection_destroy(void* arg, grpc_error* error) { static void connection_destroy(void* arg, grpc_error* error) {
grpc_connected_subchannel* c = (grpc_connected_subchannel*)arg; grpc_channel_stack* stk = (grpc_channel_stack*)arg;
grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CONNECTION(c)); grpc_channel_stack_destroy(stk);
gpr_free(c); gpr_free(stk);
}
grpc_connected_subchannel* grpc_connected_subchannel_ref(
grpc_connected_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
return c;
}
void grpc_connected_subchannel_unref(
grpc_connected_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
} }
/* /*
@ -243,18 +229,13 @@ grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
} }
static void disconnect(grpc_subchannel* c) { static void disconnect(grpc_subchannel* c) {
grpc_connected_subchannel* con;
grpc_subchannel_index_unregister(c->key, c); grpc_subchannel_index_unregister(c->key, c);
gpr_mu_lock(&c->mu); gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected); GPR_ASSERT(!c->disconnected);
c->disconnected = true; c->disconnected = true;
grpc_connector_shutdown(c->connector, GRPC_ERROR_CREATE_FROM_STATIC_STRING( grpc_connector_shutdown(c->connector, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Subchannel disconnected")); "Subchannel disconnected"));
con = GET_CONNECTED_SUBCHANNEL(c, no_barrier); c->connected_subchannel.reset();
if (con != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(con, "connection");
gpr_atm_no_barrier_store(&c->connected_subchannel, (gpr_atm)0xdeadbeef);
}
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
} }
@ -374,7 +355,7 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
if (new_args != nullptr) grpc_channel_args_destroy(new_args); if (new_args != nullptr) grpc_channel_args_destroy(new_args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev = c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher; &c->root_external_state_watcher;
GRPC_CLOSURE_INIT(&c->connected, subchannel_connected, c, GRPC_CLOSURE_INIT(&c->on_connected, on_subchannel_connected, c,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel"); "subchannel");
@ -397,7 +378,7 @@ static void continue_connect_locked(grpc_subchannel* c) {
grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_CONNECTING, grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_NONE, "state_change"); GRPC_ERROR_NONE, "state_change");
grpc_connector_connect(c->connector, &args, &c->connecting_result, grpc_connector_connect(c->connector, &args, &c->connecting_result,
&c->connected); &c->on_connected);
} }
grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c, grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c,
@ -458,7 +439,7 @@ static void maybe_start_connecting_locked(grpc_subchannel* c) {
return; return;
} }
if (GET_CONNECTED_SUBCHANNEL(c, no_barrier) != nullptr) { if (c->connected_subchannel != nullptr) {
/* Already connected: don't restart */ /* Already connected: don't restart */
return; return;
} }
@ -481,9 +462,10 @@ static void maybe_start_connecting_locked(grpc_subchannel* c) {
const grpc_millis time_til_next = const grpc_millis time_til_next =
c->next_attempt_deadline - grpc_core::ExecCtx::Get()->Now(); c->next_attempt_deadline - grpc_core::ExecCtx::Get()->Now();
if (time_til_next <= 0) { if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately"); gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", c);
} else { } else {
gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next); gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRIdPTR " milliseconds", c,
time_til_next);
} }
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
grpc_timer_init(&c->alarm, c->next_attempt_deadline, &c->on_alarm); grpc_timer_init(&c->alarm, c->next_attempt_deadline, &c->on_alarm);
@ -527,75 +509,56 @@ void grpc_subchannel_notify_on_state_change(
} }
} }
void grpc_connected_subchannel_process_transport_op( static void on_connected_subchannel_connectivity_changed(void* p,
grpc_connected_subchannel* con, grpc_transport_op* op) { grpc_error* error) {
grpc_channel_stack* channel_stack = CHANNEL_STACK_FROM_CONNECTION(con); state_watcher* connected_subchannel_watcher = (state_watcher*)p;
grpc_channel_element* top_elem = grpc_channel_stack_element(channel_stack, 0); grpc_subchannel* c = connected_subchannel_watcher->subchannel;
top_elem->filter->start_transport_op(top_elem, op);
}
static void subchannel_on_child_state_changed(void* p, grpc_error* error) {
state_watcher* sw = (state_watcher*)p;
grpc_subchannel* c = sw->subchannel;
gpr_mu* mu = &c->mu; gpr_mu* mu = &c->mu;
gpr_mu_lock(mu); gpr_mu_lock(mu);
/* if we failed just leave this closure */ switch (connected_subchannel_watcher->connectivity_state) {
if (sw->connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { case GRPC_CHANNEL_TRANSIENT_FAILURE:
/* any errors on a subchannel ==> we're done, create a new one */ case GRPC_CHANNEL_SHUTDOWN: {
sw->connectivity_state = GRPC_CHANNEL_SHUTDOWN; if (!c->disconnected && c->connected_subchannel != nullptr) {
} if (grpc_trace_stream_refcount.enabled()) {
grpc_connectivity_state_set(&c->state_tracker, sw->connectivity_state, gpr_log(GPR_INFO,
GRPC_ERROR_REF(error), "reflect_child"); "Connected subchannel %p of subchannel %p has gone into %s. "
if (sw->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { "Attempting to reconnect.",
grpc_connected_subchannel_notify_on_state_change( c->connected_subchannel.get(), c,
GET_CONNECTED_SUBCHANNEL(c, no_barrier), nullptr, grpc_connectivity_state_name(
&sw->connectivity_state, &sw->closure); connected_subchannel_watcher->connectivity_state));
GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); }
sw = nullptr; c->connected_subchannel.reset();
grpc_connectivity_state_set(&c->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "reflect_child");
c->backoff_begun = false;
c->backoff->Reset();
maybe_start_connecting_locked(c);
} else {
connected_subchannel_watcher->connectivity_state =
GRPC_CHANNEL_SHUTDOWN;
}
break;
}
default: {
grpc_connectivity_state_set(
&c->state_tracker, connected_subchannel_watcher->connectivity_state,
GRPC_ERROR_REF(error), "reflect_child");
GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
c->connected_subchannel->NotifyOnStateChange(
nullptr, &connected_subchannel_watcher->connectivity_state,
&connected_subchannel_watcher->closure);
connected_subchannel_watcher = nullptr;
}
} }
gpr_mu_unlock(mu); gpr_mu_unlock(mu);
GRPC_SUBCHANNEL_WEAK_UNREF(c, "state_watcher"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "state_watcher");
gpr_free(sw); gpr_free(connected_subchannel_watcher);
}
static void connected_subchannel_state_op(grpc_connected_subchannel* con,
grpc_pollset_set* interested_parties,
grpc_connectivity_state* state,
grpc_closure* closure) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
op->connectivity_state = state;
op->on_connectivity_state_change = closure;
op->bind_pollset_set = interested_parties;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(elem, op);
}
void grpc_connected_subchannel_notify_on_state_change(
grpc_connected_subchannel* con, grpc_pollset_set* interested_parties,
grpc_connectivity_state* state, grpc_closure* closure) {
connected_subchannel_state_op(con, interested_parties, state, closure);
}
void grpc_connected_subchannel_ping(grpc_connected_subchannel* con,
grpc_closure* on_initiate,
grpc_closure* on_ack) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
op->send_ping.on_initiate = on_initiate;
op->send_ping.on_ack = on_ack;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(elem, op);
} }
static bool publish_transport_locked(grpc_subchannel* c) { static bool publish_transport_locked(grpc_subchannel* c) {
grpc_connected_subchannel* con;
grpc_channel_stack* stk;
state_watcher* sw_subchannel;
/* construct channel stack */ /* construct channel stack */
grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_channel_arguments( grpc_channel_stack_builder_set_channel_arguments(
@ -607,8 +570,9 @@ static bool publish_transport_locked(grpc_subchannel* c) {
grpc_channel_stack_builder_destroy(builder); grpc_channel_stack_builder_destroy(builder);
return false; return false;
} }
grpc_channel_stack* stk;
grpc_error* error = grpc_channel_stack_builder_finish( grpc_error* error = grpc_channel_stack_builder_finish(
builder, 0, 1, connection_destroy, nullptr, (void**)&con); builder, 0, 1, connection_destroy, nullptr, (void**)&stk);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_transport_destroy(c->connecting_result.transport); grpc_transport_destroy(c->connecting_result.transport);
gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
@ -616,38 +580,37 @@ static bool publish_transport_locked(grpc_subchannel* c) {
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
return false; return false;
} }
stk = CHANNEL_STACK_FROM_CONNECTION(con);
memset(&c->connecting_result, 0, sizeof(c->connecting_result)); memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */ /* initialize state watcher */
sw_subchannel = (state_watcher*)gpr_malloc(sizeof(*sw_subchannel)); state_watcher* connected_subchannel_watcher =
sw_subchannel->subchannel = c; (state_watcher*)gpr_zalloc(sizeof(*connected_subchannel_watcher));
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY; connected_subchannel_watcher->subchannel = c;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed, connected_subchannel_watcher->connectivity_state = GRPC_CHANNEL_READY;
sw_subchannel, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&connected_subchannel_watcher->closure,
on_connected_subchannel_connectivity_changed,
connected_subchannel_watcher, grpc_schedule_on_exec_ctx);
if (c->disconnected) { if (c->disconnected) {
gpr_free(sw_subchannel); gpr_free(connected_subchannel_watcher);
grpc_channel_stack_destroy(stk); grpc_channel_stack_destroy(stk);
gpr_free(con); gpr_free(stk);
return false; return false;
} }
/* publish */ /* publish */
/* TODO(ctiller): this full barrier seems to clear up a TSAN failure. c->connected_subchannel.reset(
I'd have expected the rel_cas below to be enough, but grpc_core::New<grpc_core::ConnectedSubchannel>(stk));
seemingly it's not. gpr_log(GPR_INFO, "New connected subchannel at %p for subchannel %p",
Re-evaluate if we really need this. */ c->connected_subchannel.get(), c);
gpr_atm_full_barrier();
GPR_ASSERT(gpr_atm_rel_cas(&c->connected_subchannel, 0, (gpr_atm)con));
/* setup subchannel watching connected subchannel for changes; subchannel /* setup subchannel watching connected subchannel for changes; subchannel
ref for connecting is donated to the state watcher */ ref for connecting is donated to the state watcher */
GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
grpc_connected_subchannel_notify_on_state_change( c->connected_subchannel->NotifyOnStateChange(
con, c->pollset_set, &sw_subchannel->connectivity_state, c->pollset_set, &connected_subchannel_watcher->connectivity_state,
&sw_subchannel->closure); &connected_subchannel_watcher->closure);
/* signal completion */ /* signal completion */
grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_READY, grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_READY,
@ -655,11 +618,11 @@ static bool publish_transport_locked(grpc_subchannel* c) {
return true; return true;
} }
static void subchannel_connected(void* arg, grpc_error* error) { static void on_subchannel_connected(void* arg, grpc_error* error) {
grpc_subchannel* c = (grpc_subchannel*)arg; grpc_subchannel* c = (grpc_subchannel*)arg;
grpc_channel_args* delete_channel_args = c->connecting_result.channel_args; grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected"); GRPC_SUBCHANNEL_WEAK_REF(c, "on_subchannel_connected");
gpr_mu_lock(&c->mu); gpr_mu_lock(&c->mu);
c->connecting = false; c->connecting = false;
if (c->connecting_result.transport != nullptr && if (c->connecting_result.transport != nullptr &&
@ -694,10 +657,10 @@ static void subchannel_call_destroy(void* call, grpc_error* error) {
grpc_subchannel_call* c = (grpc_subchannel_call*)call; grpc_subchannel_call* c = (grpc_subchannel_call*)call;
GPR_ASSERT(c->schedule_closure_after_destroy != nullptr); GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0); GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel* connection = c->connection; grpc_core::ConnectedSubchannel* connection = c->connection;
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr, grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy); c->schedule_closure_after_destroy);
GRPC_CONNECTED_SUBCHANNEL_UNREF(connection, "subchannel_call"); connection->Unref(DEBUG_LOCATION, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0); GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
} }
@ -728,9 +691,12 @@ void grpc_subchannel_call_process_op(grpc_subchannel_call* call,
GPR_TIMER_END("grpc_subchannel_call_process_op", 0); GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
} }
grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel( grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel>
grpc_subchannel* c) { grpc_subchannel_get_connected_subchannel(grpc_subchannel* c) {
return GET_CONNECTED_SUBCHANNEL(c, acq); gpr_mu_lock(&c->mu);
auto copy = c->connected_subchannel;
gpr_mu_unlock(&c->mu);
return copy;
} }
const grpc_subchannel_key* grpc_subchannel_get_key( const grpc_subchannel_key* grpc_subchannel_get_key(
@ -738,36 +704,6 @@ const grpc_subchannel_key* grpc_subchannel_get_key(
return subchannel->key; return subchannel->key;
} }
grpc_error* grpc_connected_subchannel_create_call(
grpc_connected_subchannel* con,
const grpc_connected_subchannel_call_args* args,
grpc_subchannel_call** call) {
grpc_channel_stack* chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = (grpc_subchannel_call*)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = {
callstk, /* call_stack */
nullptr, /* server_transport_data */
args->context, /* context */
args->path, /* path */
args->start_time, /* start_time */
args->deadline, /* deadline */
args->arena, /* arena */
args->call_combiner /* call_combiner */
};
grpc_error* error = grpc_call_stack_init(chanstk, 1, subchannel_call_destroy,
*call, &call_args);
if (error != GRPC_ERROR_NONE) {
const char* error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
return error;
}
grpc_call_stack_set_pollset_or_pollset_set(callstk, args->pollent);
return GRPC_ERROR_NONE;
}
grpc_call_stack* grpc_subchannel_call_get_call_stack( grpc_call_stack* grpc_subchannel_call_get_call_stack(
grpc_subchannel_call* subchannel_call) { grpc_subchannel_call* subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call); return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
@ -803,3 +739,64 @@ grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) {
(char*)GRPC_ARG_SUBCHANNEL_ADDRESS, (char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("")); addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
} }
namespace grpc_core {
ConnectedSubchannel::ConnectedSubchannel(grpc_channel_stack* channel_stack)
: grpc_core::RefCountedWithTracing(&grpc_trace_stream_refcount),
channel_stack_(channel_stack) {}
ConnectedSubchannel::~ConnectedSubchannel() {
GRPC_CHANNEL_STACK_UNREF(channel_stack_, "connected_subchannel_dtor");
}
void ConnectedSubchannel::NotifyOnStateChange(
grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
grpc_closure* closure) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
op->connectivity_state = state;
op->on_connectivity_state_change = closure;
op->bind_pollset_set = interested_parties;
elem = grpc_channel_stack_element(channel_stack_, 0);
elem->filter->start_transport_op(elem, op);
}
void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
grpc_closure* on_ack) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
op->send_ping.on_initiate = on_initiate;
op->send_ping.on_ack = on_ack;
elem = grpc_channel_stack_element(channel_stack_, 0);
elem->filter->start_transport_op(elem, op);
}
grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
*call = (grpc_subchannel_call*)gpr_arena_alloc(
args.arena,
sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size);
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
Ref(DEBUG_LOCATION, "subchannel_call");
(*call)->connection = this;
const grpc_call_element_args call_args = {
callstk, /* call_stack */
nullptr, /* server_transport_data */
args.context, /* context */
args.path, /* path */
args.start_time, /* start_time */
args.deadline, /* deadline */
args.arena, /* arena */
args.call_combiner /* call_combiner */
};
grpc_error* error = grpc_call_stack_init(
channel_stack_, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
const char* error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
return error;
}
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
return GRPC_ERROR_NONE;
}
} // namespace grpc_core

@ -23,6 +23,8 @@
#include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h" #include "src/core/lib/support/arena.h"
#include "src/core/lib/support/ref_counted.h"
#include "src/core/lib/support/ref_counted_ptr.h"
#include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/metadata.h"
@ -32,7 +34,6 @@
/** A (sub-)channel that knows how to connect to exactly one target /** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */ address. Provides a target for load balancing. */
typedef struct grpc_subchannel grpc_subchannel; typedef struct grpc_subchannel grpc_subchannel;
typedef struct grpc_connected_subchannel grpc_connected_subchannel;
typedef struct grpc_subchannel_call grpc_subchannel_call; typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args; typedef struct grpc_subchannel_args grpc_subchannel_args;
typedef struct grpc_subchannel_key grpc_subchannel_key; typedef struct grpc_subchannel_key grpc_subchannel_key;
@ -48,10 +49,6 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r)) grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \ #define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \
grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r)) grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
grpc_connected_subchannel_unref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \ #define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r)) grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \ #define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
@ -65,14 +62,39 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p)) #define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p)) #define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p)) #define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
grpc_connected_subchannel_unref((p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p)) #define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p)) #define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif #endif
namespace grpc_core {
class ConnectedSubchannel : public grpc_core::RefCountedWithTracing {
public:
struct CallArgs {
grpc_polling_entity* pollent;
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
grpc_call_context_element* context;
grpc_call_combiner* call_combiner;
};
explicit ConnectedSubchannel(grpc_channel_stack* channel_stack);
~ConnectedSubchannel();
grpc_channel_stack* channel_stack() { return channel_stack_; }
void NotifyOnStateChange(grpc_pollset_set* interested_parties,
grpc_connectivity_state* state,
grpc_closure* closure);
void Ping(grpc_closure* on_initiate, grpc_closure* on_ack);
grpc_error* CreateCall(const CallArgs& args, grpc_subchannel_call** call);
private:
grpc_channel_stack* channel_stack_;
};
} // namespace grpc_core
grpc_subchannel* grpc_subchannel_ref( grpc_subchannel* grpc_subchannel_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
grpc_subchannel* grpc_subchannel_ref_from_weak_ref( grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
@ -83,35 +105,11 @@ grpc_subchannel* grpc_subchannel_weak_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref( void grpc_subchannel_weak_unref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
grpc_connected_subchannel* grpc_connected_subchannel_ref(
grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(
grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref( void grpc_subchannel_call_ref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS); grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref( void grpc_subchannel_call_unref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS); grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call */
typedef struct {
grpc_polling_entity* pollent;
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
grpc_call_context_element* context;
grpc_call_combiner* call_combiner;
} grpc_connected_subchannel_call_args;
grpc_error* grpc_connected_subchannel_create_call(
grpc_connected_subchannel* connected_subchannel,
const grpc_connected_subchannel_call_args* args,
grpc_subchannel_call** subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
grpc_connected_subchannel* subchannel, grpc_transport_op* op);
/** poll the current connectivity state of a channel */ /** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity( grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel* channel, grpc_error** error); grpc_subchannel* channel, grpc_error** error);
@ -121,17 +119,12 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(
void grpc_subchannel_notify_on_state_change( void grpc_subchannel_notify_on_state_change(
grpc_subchannel* channel, grpc_pollset_set* interested_parties, grpc_subchannel* channel, grpc_pollset_set* interested_parties,
grpc_connectivity_state* state, grpc_closure* notify); grpc_connectivity_state* state, grpc_closure* notify);
void grpc_connected_subchannel_notify_on_state_change(
grpc_connected_subchannel* channel, grpc_pollset_set* interested_parties, /** retrieve the grpc_core::ConnectedSubchannel - or nullptr if not connected
grpc_connectivity_state* state, grpc_closure* notify); * (which may happen before it initially connects or during transient failures)
void grpc_connected_subchannel_ping(grpc_connected_subchannel* channel, * */
grpc_closure* on_initiate, grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel>
grpc_closure* on_ack); grpc_subchannel_get_connected_subchannel(grpc_subchannel* c);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel(
grpc_subchannel* subchannel);
/** return the subchannel index key for \a subchannel */ /** return the subchannel index key for \a subchannel */
const grpc_subchannel_key* grpc_subchannel_get_key( const grpc_subchannel_key* grpc_subchannel_get_key(

@ -18,8 +18,16 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/metadata.h"
void grpc_chttp2_plugin_init(void) {} void grpc_chttp2_plugin_init(void) {
g_flow_control_enabled = true;
char* env_variable = gpr_getenv("GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL");
if (env_variable != nullptr) {
g_flow_control_enabled = false;
gpr_free(env_variable);
}
}
void grpc_chttp2_plugin_shutdown(void) {} void grpc_chttp2_plugin_shutdown(void) {}

@ -152,6 +152,10 @@ static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error);
static void reset_byte_stream(void* arg, grpc_error* error); static void reset_byte_stream(void* arg, grpc_error* error);
// Flow control default enabled. Can be disabled by setting
// GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL
bool g_flow_control_enabled = true;
/******************************************************************************* /*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING * CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/ */
@ -517,7 +521,13 @@ static void init_transport(grpc_chttp2_transport* t,
} }
} }
t->flow_control.Init(t, enable_bdp); if (g_flow_control_enabled) {
t->flow_control.Init<grpc_core::chttp2::TransportFlowControl>(t,
enable_bdp);
} else {
t->flow_control.Init<grpc_core::chttp2::TransportFlowControlDisabled>(t);
enable_bdp = false;
}
/* No pings allowed before receiving a header or data frame. */ /* No pings allowed before receiving a header or data frame. */
t->ping_state.pings_before_data_required = 0; t->ping_state.pings_before_data_required = 0;
@ -682,7 +692,14 @@ static int init_stream(grpc_transport* gt, grpc_stream* gs,
post_destructive_reclaimer(t); post_destructive_reclaimer(t);
} }
s->flow_control.Init(t->flow_control.get(), s); if (t->flow_control->flow_control_enabled()) {
s->flow_control.Init<grpc_core::chttp2::StreamFlowControl>(
static_cast<grpc_core::chttp2::TransportFlowControl*>(
t->flow_control.get()),
s);
} else {
s->flow_control.Init<grpc_core::chttp2::StreamFlowControlDisabled>();
}
GPR_TIMER_END("init_stream", 0); GPR_TIMER_END("init_stream", 0);
return 0; return 0;
@ -2402,8 +2419,11 @@ static void read_action_locked(void* tp, grpc_error* error) {
grpc_error* errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE, grpc_error* errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE}; GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) { for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
t->flow_control->bdp_estimator()->AddIncomingBytes( grpc_core::BdpEstimator* bdp_est = t->flow_control->bdp_estimator();
(int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i])); if (bdp_est) {
bdp_est->AddIncomingBytes(
(int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i]));
}
errors[1] = grpc_chttp2_perform_read(t, t->read_buffer.slices[i]); errors[1] = grpc_chttp2_perform_read(t, t->read_buffer.slices[i]);
} }
if (errors[1] != GRPC_ERROR_NONE) { if (errors[1] != GRPC_ERROR_NONE) {

@ -27,6 +27,8 @@ extern grpc_core::TraceFlag grpc_http_trace;
extern grpc_core::TraceFlag grpc_trace_http2_stream_state; extern grpc_core::TraceFlag grpc_trace_http2_stream_state;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount; extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount;
extern bool g_flow_control_enabled;
grpc_transport* grpc_create_chttp2_transport( grpc_transport* grpc_create_chttp2_transport(
const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client); const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client);

@ -149,6 +149,25 @@ void FlowControlAction::Trace(grpc_chttp2_transport* t) const {
gpr_free(mf_str); gpr_free(mf_str);
} }
TransportFlowControlDisabled::TransportFlowControlDisabled(
grpc_chttp2_transport* t) {
remote_window_ = kMaxWindow;
target_initial_window_size_ = kMaxWindow;
announced_window_ = kMaxWindow;
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] =
kFrameSize;
t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] =
kFrameSize;
t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] =
kFrameSize;
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] =
kMaxWindow;
t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] =
kMaxWindow;
t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] =
kMaxWindow;
}
TransportFlowControl::TransportFlowControl(const grpc_chttp2_transport* t, TransportFlowControl::TransportFlowControl(const grpc_chttp2_transport* t,
bool enable_bdp_probe) bool enable_bdp_probe)
: t_(t), : t_(t),
@ -318,7 +337,7 @@ double TransportFlowControl::SmoothLogBdp(double value) {
} }
FlowControlAction::Urgency TransportFlowControl::DeltaUrgency( FlowControlAction::Urgency TransportFlowControl::DeltaUrgency(
int32_t value, grpc_chttp2_setting_id setting_id) { int64_t value, grpc_chttp2_setting_id setting_id) {
int64_t delta = int64_t delta =
(int64_t)value - (int64_t)t_->settings[GRPC_LOCAL_SETTINGS][setting_id]; (int64_t)value - (int64_t)t_->settings[GRPC_LOCAL_SETTINGS][setting_id];
// TODO(ncteisen): tune this // TODO(ncteisen): tune this
@ -344,7 +363,7 @@ FlowControlAction TransportFlowControl::PeriodicUpdate() {
action.set_send_initial_window_update( action.set_send_initial_window_update(
DeltaUrgency(target_initial_window_size_, DeltaUrgency(target_initial_window_size_,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE), GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE),
target_initial_window_size_); (uint32_t)target_initial_window_size_);
// get bandwidth estimate and update max_frame accordingly. // get bandwidth estimate and update max_frame accordingly.
double bw_dbl = bdp_estimator_.EstimateBandwidth(); double bw_dbl = bdp_estimator_.EstimateBandwidth();
@ -354,7 +373,7 @@ FlowControlAction TransportFlowControl::PeriodicUpdate() {
target_initial_window_size_), target_initial_window_size_),
16384, 16777215); 16384, 16777215);
action.set_send_max_frame_size_update( action.set_send_max_frame_size_update(
DeltaUrgency(frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE), DeltaUrgency((int64_t)frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE),
frame_size); frame_size);
} }
return UpdateAction(action); return UpdateAction(action);

@ -24,6 +24,7 @@
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/http2_settings.h" #include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/lib/support/abstract.h"
#include "src/core/lib/support/manual_constructor.h" #include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/transport/bdp_estimator.h" #include "src/core/lib/transport/bdp_estimator.h"
#include "src/core/lib/transport/pid_controller.h" #include "src/core/lib/transport/pid_controller.h"
@ -43,10 +44,16 @@ namespace grpc_core {
namespace chttp2 { namespace chttp2 {
static constexpr uint32_t kDefaultWindow = 65535; static constexpr uint32_t kDefaultWindow = 65535;
static constexpr int64_t kMaxWindow = (int64_t)((1u << 31) - 1);
// TODO(ncteisen): Tune this
static constexpr uint32_t kFrameSize = 1024 * 1024;
class TransportFlowControl; class TransportFlowControl;
class StreamFlowControl; class StreamFlowControl;
// Encapsulates a collections of actions the transport needs to take with
// regard to flow control. Each action comes with urgencies that tell the
// transport how quickly the action must take place.
class FlowControlAction { class FlowControlAction {
public: public:
enum class Urgency : uint8_t { enum class Urgency : uint8_t {
@ -132,36 +139,122 @@ class FlowControlTrace {
int64_t announced_window_delta_; int64_t announced_window_delta_;
}; };
class TransportFlowControl { // Fat interface with all methods a flow control implementation needs to
// support. gRPC C Core does not support pure virtual functions, so instead
// we abort in any methods which require implementation in the base class.
class TransportFlowControlBase {
public:
TransportFlowControlBase() {}
virtual ~TransportFlowControlBase() {}
// Is flow control enabled? This is needed in other codepaths like the checks
// in parsing and in writing.
virtual bool flow_control_enabled() const { abort(); }
// Called to check if the transport needs to send a WINDOW_UPDATE frame
virtual uint32_t MaybeSendUpdate(bool writing_anyway) { abort(); }
// Using the protected members, returns and Action to be taken by the
// tranport.
virtual FlowControlAction MakeAction() { abort(); }
// Using the protected members, returns and Action to be taken by the
// tranport. Also checks for updates to our BDP estimate and acts
// accordingly.
virtual FlowControlAction PeriodicUpdate() { abort(); }
// Called to do bookkeeping when a stream owned by this transport sends
// data on the wire
virtual void StreamSentData(int64_t size) { abort(); }
// Called to do bookkeeping when a stream owned by this transport receives
// data from the wire. Also does error checking for frame size.
virtual grpc_error* RecvData(int64_t incoming_frame_size) { abort(); }
// Called to do bookkeeping when we receive a WINDOW_UPDATE frame.
virtual void RecvUpdate(uint32_t size) { abort(); }
// Returns the BdpEstimator held by this object. Caller is responsible for
// checking for nullptr. TODO(ncteisen): consider fully encapsulating all
// bdp estimator actions inside TransportFlowControl
virtual BdpEstimator* bdp_estimator() { return nullptr; }
// Getters
int64_t remote_window() const { return remote_window_; }
virtual int64_t target_window() const { return target_initial_window_size_; }
int64_t announced_window() const { return announced_window_; }
// Used in certain benchmarks in which we don't want FlowControl to be a
// factor
virtual void TestOnlyForceHugeWindow() {}
GRPC_ABSTRACT_BASE_CLASS
protected:
friend class ::grpc::testing::TrickledCHTTP2;
int64_t remote_window_ = kDefaultWindow;
int64_t target_initial_window_size_ = kDefaultWindow;
int64_t announced_window_ = kDefaultWindow;
};
// Implementation of flow control that does NOTHING. Always returns maximum
// values, never initiates writes, and assumes that the remote peer is doing
// the same. To be used to narrow down on flow control as the cause of negative
// performance.
class TransportFlowControlDisabled final : public TransportFlowControlBase {
public:
// Maxes out all values
TransportFlowControlDisabled(grpc_chttp2_transport* t);
bool flow_control_enabled() const override { return false; }
// Never do anything.
uint32_t MaybeSendUpdate(bool writing_anyway) override { return 0; }
FlowControlAction MakeAction() override { return FlowControlAction(); }
FlowControlAction PeriodicUpdate() override { return FlowControlAction(); }
void StreamSentData(int64_t size) override {}
grpc_error* RecvData(int64_t incoming_frame_size) override {
return GRPC_ERROR_NONE;
}
void RecvUpdate(uint32_t size) override {}
};
// Implementation of flow control that abides to HTTP/2 spec and attempts
// to be as performant as possible.
class TransportFlowControl final : public TransportFlowControlBase {
public: public:
TransportFlowControl(const grpc_chttp2_transport* t, bool enable_bdp_probe); TransportFlowControl(const grpc_chttp2_transport* t, bool enable_bdp_probe);
~TransportFlowControl() {} ~TransportFlowControl() {}
bool flow_control_enabled() const override { return true; }
bool bdp_probe() const { return enable_bdp_probe_; } bool bdp_probe() const { return enable_bdp_probe_; }
// returns an announce if we should send a transport update to our peer, // returns an announce if we should send a transport update to our peer,
// else returns zero; writing_anyway indicates if a write would happen // else returns zero; writing_anyway indicates if a write would happen
// regardless of the send - if it is false and this function returns non-zero, // regardless of the send - if it is false and this function returns non-zero,
// this announce will cause a write to occur // this announce will cause a write to occur
uint32_t MaybeSendUpdate(bool writing_anyway); uint32_t MaybeSendUpdate(bool writing_anyway) override;
// Reads the flow control data and returns and actionable struct that will // Reads the flow control data and returns and actionable struct that will
// tell chttp2 exactly what it needs to do // tell chttp2 exactly what it needs to do
FlowControlAction MakeAction() { return UpdateAction(FlowControlAction()); } FlowControlAction MakeAction() override {
return UpdateAction(FlowControlAction());
}
// Call periodically (at a low-ish rate, 100ms - 10s makes sense) // Call periodically (at a low-ish rate, 100ms - 10s makes sense)
// to perform more complex flow control calculations and return an action // to perform more complex flow control calculations and return an action
// to let chttp2 change its parameters // to let chttp2 change its parameters
FlowControlAction PeriodicUpdate(); FlowControlAction PeriodicUpdate() override;
void StreamSentData(int64_t size) { remote_window_ -= size; } void StreamSentData(int64_t size) override { remote_window_ -= size; }
grpc_error* ValidateRecvData(int64_t incoming_frame_size); grpc_error* ValidateRecvData(int64_t incoming_frame_size);
void CommitRecvData(int64_t incoming_frame_size) { void CommitRecvData(int64_t incoming_frame_size) {
announced_window_ -= incoming_frame_size; announced_window_ -= incoming_frame_size;
} }
grpc_error* RecvData(int64_t incoming_frame_size) { grpc_error* RecvData(int64_t incoming_frame_size) override {
FlowControlTrace trace(" data recv", this, nullptr); FlowControlTrace trace(" data recv", this, nullptr);
grpc_error* error = ValidateRecvData(incoming_frame_size); grpc_error* error = ValidateRecvData(incoming_frame_size);
if (error != GRPC_ERROR_NONE) return error; if (error != GRPC_ERROR_NONE) return error;
@ -170,18 +263,18 @@ class TransportFlowControl {
} }
// we have received a WINDOW_UPDATE frame for a transport // we have received a WINDOW_UPDATE frame for a transport
void RecvUpdate(uint32_t size) { void RecvUpdate(uint32_t size) override {
FlowControlTrace trace("t updt recv", this, nullptr); FlowControlTrace trace("t updt recv", this, nullptr);
remote_window_ += size; remote_window_ += size;
} }
int64_t remote_window() const { return remote_window_; } // See comment above announced_stream_total_over_incoming_window_ for the
int64_t target_window() const { // logic behind this decision.
int64_t target_window() const override {
return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1), return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
announced_stream_total_over_incoming_window_ + announced_stream_total_over_incoming_window_ +
target_initial_window_size_); target_initial_window_size_);
} }
int64_t announced_window() const { return announced_window_; }
const grpc_chttp2_transport* transport() const { return t_; } const grpc_chttp2_transport* transport() const { return t_; }
@ -201,18 +294,17 @@ class TransportFlowControl {
} }
} }
BdpEstimator* bdp_estimator() { return &bdp_estimator_; } BdpEstimator* bdp_estimator() override { return &bdp_estimator_; }
void TestOnlyForceHugeWindow() { void TestOnlyForceHugeWindow() override {
announced_window_ = 1024 * 1024 * 1024; announced_window_ = 1024 * 1024 * 1024;
remote_window_ = 1024 * 1024 * 1024; remote_window_ = 1024 * 1024 * 1024;
} }
private: private:
friend class ::grpc::testing::TrickledCHTTP2;
double TargetLogBdp(); double TargetLogBdp();
double SmoothLogBdp(double value); double SmoothLogBdp(double value);
FlowControlAction::Urgency DeltaUrgency(int32_t value, FlowControlAction::Urgency DeltaUrgency(int64_t value,
grpc_chttp2_setting_id setting_id); grpc_chttp2_setting_id setting_id);
FlowControlAction UpdateAction(FlowControlAction action) { FlowControlAction UpdateAction(FlowControlAction action) {
@ -225,9 +317,6 @@ class TransportFlowControl {
const grpc_chttp2_transport* const t_; const grpc_chttp2_transport* const t_;
/** Our bookkeeping for the remote peer's available window */
int64_t remote_window_ = kDefaultWindow;
/** calculating what we should give for local window: /** calculating what we should give for local window:
we track the total amount of flow control over initial window size we track the total amount of flow control over initial window size
across all streams: this is data that we want to receive right now (it across all streams: this is data that we want to receive right now (it
@ -239,13 +328,6 @@ class TransportFlowControl {
int64_t announced_stream_total_over_incoming_window_ = 0; int64_t announced_stream_total_over_incoming_window_ = 0;
int64_t announced_stream_total_under_incoming_window_ = 0; int64_t announced_stream_total_under_incoming_window_ = 0;
/** This is out window according to what we have sent to our remote peer. The
* difference between this and target window is what we use to decide when
* to send WINDOW_UPDATE frames. */
int64_t announced_window_ = kDefaultWindow;
int32_t target_initial_window_size_ = kDefaultWindow;
/** should we probe bdp? */ /** should we probe bdp? */
const bool enable_bdp_probe_; const bool enable_bdp_probe_;
@ -257,39 +339,117 @@ class TransportFlowControl {
grpc_millis last_pid_update_ = 0; grpc_millis last_pid_update_ = 0;
}; };
class StreamFlowControl { // Fat interface with all methods a stream flow control implementation needs
// to support. gRPC C Core does not support pure virtual functions, so instead
// we abort in any methods which require implementation in the base class.
class StreamFlowControlBase {
public:
StreamFlowControlBase() {}
virtual ~StreamFlowControlBase() {}
// Updates an action using the protected members.
virtual FlowControlAction UpdateAction(FlowControlAction action) { abort(); }
// Using the protected members, returns an Action for this stream to be
// taken by the tranport.
virtual FlowControlAction MakeAction() { abort(); }
// Bookkeeping for when data is sent on this stream.
virtual void SentData(int64_t outgoing_frame_size) { abort(); }
// Bookkeeping and error checking for when data is received by this stream.
virtual grpc_error* RecvData(int64_t incoming_frame_size) { abort(); }
// Called to check if this stream needs to send a WINDOW_UPDATE frame.
virtual uint32_t MaybeSendUpdate() { abort(); }
// Bookkeeping for receiving a WINDOW_UPDATE from for this stream.
virtual void RecvUpdate(uint32_t size) { abort(); }
// Bookkeeping for when a call pulls bytes out of the transport. At this
// point we consider the data 'used' and can thus let out peer know we are
// ready for more data.
virtual void IncomingByteStreamUpdate(size_t max_size_hint,
size_t have_already) {
abort();
}
// Used in certain benchmarks in which we don't want FlowControl to be a
// factor
virtual void TestOnlyForceHugeWindow() {}
// Getters
int64_t remote_window_delta() { return remote_window_delta_; }
int64_t local_window_delta() { return local_window_delta_; }
int64_t announced_window_delta() { return announced_window_delta_; }
GRPC_ABSTRACT_BASE_CLASS
protected:
friend class ::grpc::testing::TrickledCHTTP2;
int64_t remote_window_delta_ = 0;
int64_t local_window_delta_ = 0;
int64_t announced_window_delta_ = 0;
};
// Implementation of flow control that does NOTHING. Always returns maximum
// values, never initiates writes, and assumes that the remote peer is doing
// the same. To be used to narrow down on flow control as the cause of negative
// performance.
class StreamFlowControlDisabled : public StreamFlowControlBase {
public:
FlowControlAction UpdateAction(FlowControlAction action) override {
return action;
}
FlowControlAction MakeAction() override { return FlowControlAction(); }
void SentData(int64_t outgoing_frame_size) override {}
grpc_error* RecvData(int64_t incoming_frame_size) override {
return GRPC_ERROR_NONE;
}
uint32_t MaybeSendUpdate() override { return 0; }
void RecvUpdate(uint32_t size) override {}
void IncomingByteStreamUpdate(size_t max_size_hint,
size_t have_already) override {}
};
// Implementation of flow control that abides to HTTP/2 spec and attempts
// to be as performant as possible.
class StreamFlowControl final : public StreamFlowControlBase {
public: public:
StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s); StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s);
~StreamFlowControl() { ~StreamFlowControl() {
tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_); tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
} }
FlowControlAction UpdateAction(FlowControlAction action); FlowControlAction UpdateAction(FlowControlAction action) override;
FlowControlAction MakeAction() { return UpdateAction(tfc_->MakeAction()); } FlowControlAction MakeAction() override {
return UpdateAction(tfc_->MakeAction());
}
// we have sent data on the wire, we must track this in our bookkeeping for // we have sent data on the wire, we must track this in our bookkeeping for
// the remote peer's flow control. // the remote peer's flow control.
void SentData(int64_t outgoing_frame_size) { void SentData(int64_t outgoing_frame_size) override {
FlowControlTrace tracer(" data sent", tfc_, this); FlowControlTrace tracer(" data sent", tfc_, this);
tfc_->StreamSentData(outgoing_frame_size); tfc_->StreamSentData(outgoing_frame_size);
remote_window_delta_ -= outgoing_frame_size; remote_window_delta_ -= outgoing_frame_size;
} }
// we have received data from the wire // we have received data from the wire
grpc_error* RecvData(int64_t incoming_frame_size); grpc_error* RecvData(int64_t incoming_frame_size) override;
// returns an announce if we should send a stream update to our peer, else // returns an announce if we should send a stream update to our peer, else
// returns zero // returns zero
uint32_t MaybeSendUpdate(); uint32_t MaybeSendUpdate() override;
// we have received a WINDOW_UPDATE frame for a stream // we have received a WINDOW_UPDATE frame for a stream
void RecvUpdate(uint32_t size) { void RecvUpdate(uint32_t size) override {
FlowControlTrace trace("s updt recv", tfc_, this); FlowControlTrace trace("s updt recv", tfc_, this);
remote_window_delta_ += size; remote_window_delta_ += size;
} }
// the application is asking for a certain amount of bytes // the application is asking for a certain amount of bytes
void IncomingByteStreamUpdate(size_t max_size_hint, size_t have_already); void IncomingByteStreamUpdate(size_t max_size_hint,
size_t have_already) override;
int64_t remote_window_delta() const { return remote_window_delta_; } int64_t remote_window_delta() const { return remote_window_delta_; }
int64_t local_window_delta() const { return local_window_delta_; } int64_t local_window_delta() const { return local_window_delta_; }
@ -297,14 +457,13 @@ class StreamFlowControl {
const grpc_chttp2_stream* stream() const { return s_; } const grpc_chttp2_stream* stream() const { return s_; }
void TestOnlyForceHugeWindow() { void TestOnlyForceHugeWindow() override {
announced_window_delta_ = 1024 * 1024 * 1024; announced_window_delta_ = 1024 * 1024 * 1024;
local_window_delta_ = 1024 * 1024 * 1024; local_window_delta_ = 1024 * 1024 * 1024;
remote_window_delta_ = 1024 * 1024 * 1024; remote_window_delta_ = 1024 * 1024 * 1024;
} }
private: private:
friend class ::grpc::testing::TrickledCHTTP2;
TransportFlowControl* const tfc_; TransportFlowControl* const tfc_;
const grpc_chttp2_stream* const s_; const grpc_chttp2_stream* const s_;
@ -313,21 +472,6 @@ class StreamFlowControl {
announced_window_delta_ += change; announced_window_delta_ += change;
tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_); tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
} }
/** window available for us to send to peer, over or under the initial
* window
* size of the transport... ie:
* remote_window = remote_window_delta + transport.initial_window_size */
int64_t remote_window_delta_ = 0;
/** window available for peer to send to us (as a delta on
* transport.initial_window_size)
* local_window = local_window_delta + transport.initial_window_size */
int64_t local_window_delta_ = 0;
/** window available for peer to send to us over this stream that we have
* announced to the peer */
int64_t announced_window_delta_ = 0;
}; };
} // namespace chttp2 } // namespace chttp2

@ -186,6 +186,12 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
if (grpc_wire_id_to_setting_id(parser->id, &id)) { if (grpc_wire_id_to_setting_id(parser->id, &id)) {
const grpc_chttp2_setting_parameters* sp = const grpc_chttp2_setting_parameters* sp =
&grpc_chttp2_settings_parameters[id]; &grpc_chttp2_settings_parameters[id];
// If flow control is disabled we skip these.
if (!t->flow_control->flow_control_enabled() &&
(id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE ||
id == GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE)) {
continue;
}
if (parser->value < sp->min_value || parser->value > sp->max_value) { if (parser->value < sp->min_value || parser->value > sp->max_value) {
switch (sp->invalid_value_behavior) { switch (sp->invalid_value_behavior) {
case GRPC_CHTTP2_CLAMP_INVALID_VALUE: case GRPC_CHTTP2_CLAMP_INVALID_VALUE:

@ -351,7 +351,10 @@ struct grpc_chttp2_transport {
/** parser for goaway frames */ /** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser; grpc_chttp2_goaway_parser goaway_parser;
grpc_core::ManualConstructor<grpc_core::chttp2::TransportFlowControl> grpc_core::PolymorphicManualConstructor<
grpc_core::chttp2::TransportFlowControlBase,
grpc_core::chttp2::TransportFlowControl,
grpc_core::chttp2::TransportFlowControlDisabled>
flow_control; flow_control;
/** initial window change. This is tracked as we parse settings frames from /** initial window change. This is tracked as we parse settings frames from
* the remote peer. If there is a positive delta, then we will make all * the remote peer. If there is a positive delta, then we will make all
@ -525,7 +528,10 @@ struct grpc_chttp2_stream {
bool sent_initial_metadata; bool sent_initial_metadata;
bool sent_trailing_metadata; bool sent_trailing_metadata;
grpc_core::ManualConstructor<grpc_core::chttp2::StreamFlowControl> grpc_core::PolymorphicManualConstructor<
grpc_core::chttp2::StreamFlowControlBase,
grpc_core::chttp2::StreamFlowControl,
grpc_core::chttp2::StreamFlowControlDisabled>
flow_control; flow_control;
grpc_slice_buffer flow_controlled_buffer; grpc_slice_buffer flow_controlled_buffer;

@ -186,9 +186,10 @@ grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
goto dts_fh_0; /* loop */ goto dts_fh_0; /* loop */
} else if (t->incoming_frame_size > } else if (t->flow_control->flow_control_enabled() &&
t->settings[GRPC_ACKED_SETTINGS] t->incoming_frame_size >
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) { t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) {
char* msg; char* msg;
gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d", gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d",
t->incoming_frame_size, t->incoming_frame_size,

@ -183,6 +183,7 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport* t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t, void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) { grpc_chttp2_stream* s) {
GPR_ASSERT(t->flow_control->flow_control_enabled());
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
} }
@ -198,6 +199,7 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport* t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t, void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) { grpc_chttp2_stream* s) {
GPR_ASSERT(t->flow_control->flow_control_enabled());
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
} }

@ -20,12 +20,14 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
/* This polling engine is only relevant on linux kernels supporting epoll() */ /* This polling engine is only relevant on linux kernels supporting epoll
epoll_create() or epoll_create1() */
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll1_linux.h" #include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include <assert.h> #include <assert.h>
#include <errno.h> #include <errno.h>
#include <fcntl.h>
#include <limits.h> #include <limits.h>
#include <poll.h> #include <poll.h>
#include <pthread.h> #include <pthread.h>
@ -84,11 +86,28 @@ typedef struct epoll_set {
/* The global singleton epoll set */ /* The global singleton epoll set */
static epoll_set g_epoll_set; static epoll_set g_epoll_set;
static int epoll_create_and_cloexec() {
#ifdef GRPC_LINUX_EPOLL_CREATE1
int fd = epoll_create1(EPOLL_CLOEXEC);
if (fd < 0) {
gpr_log(GPR_ERROR, "epoll_create1 unavailable");
}
#else
int fd = epoll_create(MAX_EPOLL_EVENTS);
if (fd < 0) {
gpr_log(GPR_ERROR, "epoll_create unavailable");
} else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
return -1;
}
#endif
return fd;
}
/* Must be called *only* once */ /* Must be called *only* once */
static bool epoll_set_init() { static bool epoll_set_init() {
g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC); g_epoll_set.epfd = epoll_create_and_cloexec();
if (g_epoll_set.epfd < 0) { if (g_epoll_set.epfd < 0) {
gpr_log(GPR_ERROR, "epoll unavailable");
return false; return false;
} }

@ -21,7 +21,7 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
/* This polling engine is only relevant on linux kernels supporting epoll() */ /* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
#include "src/core/lib/iomgr/ev_epollex_linux.h" #include "src/core/lib/iomgr/ev_epollex_linux.h"
@ -1442,15 +1442,15 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
return &vtable; return &vtable;
} }
#else /* defined(GRPC_LINUX_EPOLL) */ #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
#if defined(GRPC_POSIX_SOCKET) #if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_epollex_linux.h" #include "src/core/lib/iomgr/ev_epollex_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
* NULL */ epoll_create1 is not available. Return NULL */
const grpc_event_engine_vtable* grpc_init_epollex_linux( const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) { bool explicitly_requested) {
return nullptr; return nullptr;
} }
#endif /* defined(GRPC_POSIX_SOCKET) */ #endif /* defined(GRPC_POSIX_SOCKET) */
#endif /* !defined(GRPC_LINUX_EPOLL) */ #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */

@ -22,7 +22,7 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
/* This polling engine is only relevant on linux kernels supporting epoll() */ /* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
#include "src/core/lib/iomgr/ev_epollsig_linux.h" #include "src/core/lib/iomgr/ev_epollsig_linux.h"
@ -1725,11 +1725,11 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
return &vtable; return &vtable;
} }
#else /* defined(GRPC_LINUX_EPOLL) */ #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
#if defined(GRPC_POSIX_SOCKET) #if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_epollsig_linux.h" #include "src/core/lib/iomgr/ev_epollsig_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
* NULL */ epoll_create1 is not available. Return NULL */
const grpc_event_engine_vtable* grpc_init_epollsig_linux( const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) { bool explicit_request) {
return nullptr; return nullptr;
@ -1737,4 +1737,4 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
#endif /* defined(GRPC_POSIX_SOCKET) */ #endif /* defined(GRPC_POSIX_SOCKET) */
void grpc_use_signal(int signum) {} void grpc_use_signal(int signum) {}
#endif /* !defined(GRPC_LINUX_EPOLL) */ #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */

@ -24,10 +24,10 @@
const grpc_event_engine_vtable* grpc_init_epollsig_linux(bool explicit_request); const grpc_event_engine_vtable* grpc_init_epollsig_linux(bool explicit_request);
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
void* grpc_fd_get_polling_island(grpc_fd* fd); void* grpc_fd_get_polling_island(grpc_fd* fd);
void* grpc_pollset_get_polling_island(grpc_pollset* ps); void* grpc_pollset_get_polling_island(grpc_pollset* ps);
bool grpc_are_polling_islands_equal(void* p, void* q); bool grpc_are_polling_islands_equal(void* p, void* q);
#endif /* defined(GRPC_LINUX_EPOLL) */ #endif /* defined(GRPC_LINUX_EPOLL_CREATE1) */
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */ #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */

@ -20,7 +20,7 @@
#include "src/core/lib/iomgr/is_epollexclusive_available.h" #include "src/core/lib/iomgr/is_epollexclusive_available.h"
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
#include <grpc/support/log.h> #include <grpc/support/log.h>

@ -37,6 +37,7 @@
#define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1 #define GRPC_POSIX_WAKEUP_FD 1
#define GRPC_TIMER_USE_GENERIC 1 #define GRPC_TIMER_USE_GENERIC 1
#define GRPC_LINUX_EPOLL 1
#elif defined(GPR_WINDOWS) #elif defined(GPR_WINDOWS)
#define GRPC_TIMER_USE_GENERIC 1 #define GRPC_TIMER_USE_GENERIC 1
#define GRPC_WINSOCK_SOCKET 1 #define GRPC_WINSOCK_SOCKET 1
@ -67,8 +68,11 @@
#define GRPC_POSIX_WAKEUP_FD 1 #define GRPC_POSIX_WAKEUP_FD 1
#define GRPC_TIMER_USE_GENERIC 1 #define GRPC_TIMER_USE_GENERIC 1
#ifdef __GLIBC_PREREQ #ifdef __GLIBC_PREREQ
#if __GLIBC_PREREQ(2, 9) #if __GLIBC_PREREQ(2, 4)
#define GRPC_LINUX_EPOLL 1 #define GRPC_LINUX_EPOLL 1
#endif
#if __GLIBC_PREREQ(2, 9)
#define GRPC_LINUX_EPOLL_CREATE1 1
#define GRPC_LINUX_EVENTFD 1 #define GRPC_LINUX_EVENTFD 1
#endif #endif
#if __GLIBC_PREREQ(2, 10) #if __GLIBC_PREREQ(2, 10)
@ -77,6 +81,7 @@
#endif #endif
#ifndef __GLIBC__ #ifndef __GLIBC__
#define GRPC_LINUX_EPOLL 1 #define GRPC_LINUX_EPOLL 1
#define GRPC_LINUX_EPOLL_CREATE1 1
#define GRPC_LINUX_EVENTFD 1 #define GRPC_LINUX_EVENTFD 1
#define GRPC_MSG_IOVLEN_TYPE int #define GRPC_MSG_IOVLEN_TYPE int
#endif #endif

@ -21,6 +21,10 @@
#define _GNU_SOURCE #define _GNU_SOURCE
#endif #endif
#ifndef SO_RXQ_OVFL
#define SO_RXQ_OVFL 40
#endif
#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/port.h"
#ifdef GRPC_POSIX_SOCKET #ifdef GRPC_POSIX_SOCKET
@ -280,11 +284,10 @@ static int bind_socket(grpc_socket_factory* socket_factory, int sockfd,
/* Prepare a recently-created socket for listening. */ /* Prepare a recently-created socket for listening. */
static int prepare_socket(grpc_socket_factory* socket_factory, int fd, static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
const grpc_resolved_address* addr) { const grpc_resolved_address* addr, int rcv_buf_size,
int snd_buf_size) {
grpc_resolved_address sockname_temp; grpc_resolved_address sockname_temp;
struct sockaddr* addr_ptr = (struct sockaddr*)addr->addr; struct sockaddr* addr_ptr = (struct sockaddr*)addr->addr;
/* Set send/receive socket buffers to 1 MB */
int buffer_size_bytes = 1024 * 1024;
if (fd < 0) { if (fd < 0) {
goto error; goto error;
@ -325,18 +328,25 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
goto error; goto error;
} }
if (grpc_set_socket_sndbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { if (grpc_set_socket_sndbuf(fd, snd_buf_size) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes", gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes",
buffer_size_bytes); snd_buf_size);
goto error; goto error;
} }
if (grpc_set_socket_rcvbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { if (grpc_set_socket_rcvbuf(fd, rcv_buf_size) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes", gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes",
buffer_size_bytes); rcv_buf_size);
goto error; goto error;
} }
{
int get_overflow = 1;
if (0 != setsockopt(fd, SOL_SOCKET, SO_RXQ_OVFL, &get_overflow,
sizeof(get_overflow))) {
gpr_log(GPR_INFO, "Failed to set socket overflow support");
}
}
return grpc_sockaddr_get_port(&sockname_temp); return grpc_sockaddr_get_port(&sockname_temp);
error: error:
@ -451,6 +461,7 @@ static void on_write(void* arg, grpc_error* error) {
static int add_socket_to_server(grpc_udp_server* s, int fd, static int add_socket_to_server(grpc_udp_server* s, int fd,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
int rcv_buf_size, int snd_buf_size,
grpc_udp_server_start_cb start_cb, grpc_udp_server_start_cb start_cb,
grpc_udp_server_read_cb read_cb, grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb, grpc_udp_server_write_cb write_cb,
@ -460,7 +471,8 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
char* addr_str; char* addr_str;
char* name; char* name;
port = prepare_socket(s->socket_factory, fd, addr); port =
prepare_socket(s->socket_factory, fd, addr, rcv_buf_size, snd_buf_size);
if (port >= 0) { if (port >= 0) {
grpc_sockaddr_to_string(&addr_str, addr, 1); grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str); gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
@ -495,6 +507,7 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
int grpc_udp_server_add_port(grpc_udp_server* s, int grpc_udp_server_add_port(grpc_udp_server* s,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
int rcv_buf_size, int snd_buf_size,
grpc_udp_server_start_cb start_cb, grpc_udp_server_start_cb start_cb,
grpc_udp_server_read_cb read_cb, grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb, grpc_udp_server_write_cb write_cb,
@ -545,8 +558,9 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
// TODO(rjshade): Test and propagate the returned grpc_error*: // TODO(rjshade): Test and propagate the returned grpc_error*:
GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory( GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
allocated_port1 = add_socket_to_server(s, fd, addr, start_cb, read_cb, allocated_port1 =
write_cb, orphan_cb); add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size, start_cb,
read_cb, write_cb, orphan_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done; goto done;
} }
@ -569,7 +583,8 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
addr = &addr4_copy; addr = &addr4_copy;
} }
allocated_port2 = allocated_port2 =
add_socket_to_server(s, fd, addr, start_cb, read_cb, write_cb, orphan_cb); add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size, start_cb,
read_cb, write_cb, orphan_cb);
done: done:
gpr_free(allocated_addr); gpr_free(allocated_addr);

@ -68,6 +68,7 @@ int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index);
all of the multiple socket port matching logic in one place */ all of the multiple socket port matching logic in one place */
int grpc_udp_server_add_port(grpc_udp_server* s, int grpc_udp_server_add_port(grpc_udp_server* s,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
int rcv_buf_size, int snd_buf_size,
grpc_udp_server_start_cb start_cb, grpc_udp_server_start_cb start_cb,
grpc_udp_server_read_cb read_cb, grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb, grpc_udp_server_write_cb write_cb,

@ -114,7 +114,7 @@ static void on_credentials_metadata(void* arg, grpc_error* input_error) {
grpc_call_next_op(elem, batch); grpc_call_next_op(elem, batch);
} else { } else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED); GRPC_STATUS_UNAVAILABLE);
grpc_transport_stream_op_batch_finish_with_failure(batch, error, grpc_transport_stream_op_batch_finish_with_failure(batch, error,
calld->call_combiner); calld->call_combiner);
} }

@ -26,4 +26,9 @@
#define GRPC_ABSTRACT_BASE_CLASS \ #define GRPC_ABSTRACT_BASE_CLASS \
static void operator delete(void* p) { abort(); } static void operator delete(void* p) { abort(); }
// gRPC currently can't depend on libstdc++, so we can't use "= 0" for
// pure virtual methods. Instead, we use this macro.
#define GRPC_ABSTRACT \
{ GPR_ASSERT(false); }
#endif /* GRPC_CORE_LIB_SUPPORT_ABSTRACT_H */ #endif /* GRPC_CORE_LIB_SUPPORT_ABSTRACT_H */

@ -0,0 +1,171 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_SUPPORT_ORPHANABLE_H
#define GRPC_CORE_LIB_SUPPORT_ORPHANABLE_H
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <memory>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/abstract.h"
#include "src/core/lib/support/debug_location.h"
#include "src/core/lib/support/memory.h"
namespace grpc_core {
// A base class for orphanable objects, which have one external owner
// but are not necessarily destroyed immediately when the external owner
// gives up ownership. Instead, the owner calls the object's Orphan()
// method, and the object then takes responsibility for its own cleanup
// and destruction.
class Orphanable {
public:
// Gives up ownership of the object. The implementation must arrange
// to eventually destroy the object without further interaction from the
// caller.
virtual void Orphan() GRPC_ABSTRACT;
// Not copyable or movable.
Orphanable(const Orphanable&) = delete;
Orphanable& operator=(const Orphanable&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected:
Orphanable() {}
virtual ~Orphanable() {}
};
template <typename T>
class OrphanableDelete {
public:
void operator()(T* p) { p->Orphan(); }
};
template <typename T, typename Deleter = OrphanableDelete<T>>
using OrphanablePtr = std::unique_ptr<T, Deleter>;
template <typename T, typename... Args>
inline OrphanablePtr<T> MakeOrphanable(Args&&... args) {
return OrphanablePtr<T>(New<T>(std::forward<Args>(args)...));
}
// A type of Orphanable with internal ref-counting.
class InternallyRefCounted : public Orphanable {
public:
// Not copyable nor movable.
InternallyRefCounted(const InternallyRefCounted&) = delete;
InternallyRefCounted& operator=(const InternallyRefCounted&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected:
InternallyRefCounted() { gpr_ref_init(&refs_, 1); }
virtual ~InternallyRefCounted() {}
void Ref() { gpr_ref(&refs_); }
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
}
}
// Allow Delete() to access destructor.
template <typename T>
friend void Delete(T*);
private:
gpr_refcount refs_;
};
// An alternative version of the InternallyRefCounted base class that
// supports tracing. This is intended to be used in cases where the
// object will be handled both by idiomatic C++ code using smart
// pointers and legacy code that is manually calling Ref() and Unref().
// Once all of our code is converted to idiomatic C++, we may be able to
// eliminate this class.
class InternallyRefCountedWithTracing : public Orphanable {
public:
// Not copyable nor movable.
InternallyRefCountedWithTracing(const InternallyRefCountedWithTracing&) =
delete;
InternallyRefCountedWithTracing& operator=(
const InternallyRefCountedWithTracing&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected:
// Allow Delete() to access destructor.
template <typename T>
friend void Delete(T*);
InternallyRefCountedWithTracing()
: InternallyRefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
explicit InternallyRefCountedWithTracing(TraceFlag* trace_flag)
: trace_flag_(trace_flag) {
gpr_ref_init(&refs_, 1);
}
#ifdef NDEBUG
explicit InternallyRefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
: InternallyRefCountedWithTracing() {}
#endif
virtual ~InternallyRefCountedWithTracing() {}
void Ref() { gpr_ref(&refs_); }
void Ref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason);
}
Ref();
}
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason);
}
Unref();
}
private:
TraceFlag* trace_flag_ = nullptr;
gpr_refcount refs_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_SUPPORT_ORPHANABLE_H */

@ -23,6 +23,7 @@
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/abstract.h"
#include "src/core/lib/support/debug_location.h" #include "src/core/lib/support/debug_location.h"
#include "src/core/lib/support/memory.h" #include "src/core/lib/support/memory.h"
@ -45,6 +46,8 @@ class RefCounted {
RefCounted(const RefCounted&) = delete; RefCounted(const RefCounted&) = delete;
RefCounted& operator=(const RefCounted&) = delete; RefCounted& operator=(const RefCounted&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected: protected:
// Allow Delete() to access destructor. // Allow Delete() to access destructor.
template <typename T> template <typename T>
@ -98,18 +101,26 @@ class RefCountedWithTracing {
RefCountedWithTracing(const RefCountedWithTracing&) = delete; RefCountedWithTracing(const RefCountedWithTracing&) = delete;
RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete; RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected: protected:
// Allow Delete() to access destructor. // Allow Delete() to access destructor.
template <typename T> template <typename T>
friend void Delete(T*); friend void Delete(T*);
RefCountedWithTracing() : RefCountedWithTracing(nullptr) {} RefCountedWithTracing()
: RefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
explicit RefCountedWithTracing(TraceFlag* trace_flag) explicit RefCountedWithTracing(TraceFlag* trace_flag)
: trace_flag_(trace_flag) { : trace_flag_(trace_flag) {
gpr_ref_init(&refs_, 1); gpr_ref_init(&refs_, 1);
} }
#ifdef NDEBUG
explicit RefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
: RefCountedWithTracing() {}
#endif
virtual ~RefCountedWithTracing() {} virtual ~RefCountedWithTracing() {}
private: private:

@ -76,6 +76,15 @@ class RefCountedPtr {
T& operator*() const { return *value_; } T& operator*() const { return *value_; }
T* operator->() const { return value_; } T* operator->() const { return value_; }
bool operator==(const RefCountedPtr& other) const {
return value_ == other.value_;
}
bool operator==(const T* other) const { return value_ == other; }
bool operator!=(const RefCountedPtr& other) const {
return value_ != other.value_;
}
bool operator!=(const T* other) const { return value_ != other; }
private: private:
T* value_ = nullptr; T* value_ = nullptr;
}; };

@ -1851,8 +1851,9 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
{ {
grpc_error* override_error = GRPC_ERROR_NONE; grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) { if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( override_error =
"Error from server send status"); error_from_status(op->data.send_status_from_server.status,
"Returned non-ok status");
} }
if (op->data.send_status_from_server.status_details != nullptr) { if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices( call->send_extra_metadata[1].md = grpc_mdelem_from_slices(

@ -189,16 +189,10 @@ int MetadataCredentialsPluginWrapper::GetMetadata(
} }
if (w->plugin_->IsBlocking()) { if (w->plugin_->IsBlocking()) {
// Asynchronous return. // Asynchronous return.
if (w->thread_pool_->Add(std::bind( w->thread_pool_->Add(
&MetadataCredentialsPluginWrapper::InvokePlugin, w, context, cb, std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context,
user_data, nullptr, nullptr, nullptr, nullptr))) { cb, user_data, nullptr, nullptr, nullptr, nullptr));
return 0; return 0;
} else {
*num_creds_md = 0;
*status = GRPC_STATUS_RESOURCE_EXHAUSTED;
*error_details = nullptr;
return true;
}
} else { } else {
// Synchronous return. // Synchronous return.
w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status, w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status,

@ -28,7 +28,7 @@ namespace {
ThreadPoolInterface* CreateDefaultThreadPoolImpl() { ThreadPoolInterface* CreateDefaultThreadPoolImpl() {
int cores = gpr_cpu_num_cores(); int cores = gpr_cpu_num_cores();
if (!cores) cores = 4; if (!cores) cores = 4;
return new DynamicThreadPool(cores, gpr_thd_new, gpr_thd_join); return new DynamicThreadPool(cores);
} }
CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl; CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl;

@ -19,32 +19,19 @@
#include "src/cpp/server/dynamic_thread_pool.h" #include "src/cpp/server/dynamic_thread_pool.h"
#include <mutex> #include <mutex>
#include <thread>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h>
namespace grpc { namespace grpc {
DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool, DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
bool* valid) : pool_(pool),
: pool_(pool) { thd_(new std::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
gpr_thd_options opt = gpr_thd_options_default(); this)) {}
gpr_thd_options_set_joinable(&opt);
std::lock_guard<std::mutex> l(dt_mu_);
valid_ = *valid = pool->thread_creator_(
&thd_, "dynamic thread",
[](void* th) {
reinterpret_cast<DynamicThreadPool::DynamicThread*>(th)->ThreadFunc();
},
this, &opt);
}
DynamicThreadPool::DynamicThread::~DynamicThread() { DynamicThreadPool::DynamicThread::~DynamicThread() {
std::lock_guard<std::mutex> l(dt_mu_); thd_->join();
if (valid_) { thd_.reset();
pool_->thread_joiner_(thd_);
}
} }
void DynamicThreadPool::DynamicThread::ThreadFunc() { void DynamicThreadPool::DynamicThread::ThreadFunc() {
@ -86,26 +73,15 @@ void DynamicThreadPool::ThreadFunc() {
} }
} }
DynamicThreadPool::DynamicThreadPool( DynamicThreadPool::DynamicThreadPool(int reserve_threads)
int reserve_threads,
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner)
: shutdown_(false), : shutdown_(false),
reserve_threads_(reserve_threads), reserve_threads_(reserve_threads),
nthreads_(0), nthreads_(0),
threads_waiting_(0), threads_waiting_(0) {
thread_creator_(thread_creator),
thread_joiner_(thread_joiner) {
for (int i = 0; i < reserve_threads_; i++) { for (int i = 0; i < reserve_threads_; i++) {
std::lock_guard<std::mutex> lock(mu_); std::lock_guard<std::mutex> lock(mu_);
nthreads_++; nthreads_++;
bool valid; new DynamicThread(this);
auto* th = new DynamicThread(this, &valid);
if (!valid) {
delete th;
}
} }
} }
@ -125,7 +101,7 @@ DynamicThreadPool::~DynamicThreadPool() {
ReapThreads(&dead_threads_); ReapThreads(&dead_threads_);
} }
bool DynamicThreadPool::Add(const std::function<void()>& callback) { void DynamicThreadPool::Add(const std::function<void()>& callback) {
std::lock_guard<std::mutex> lock(mu_); std::lock_guard<std::mutex> lock(mu_);
// Add works to the callbacks list // Add works to the callbacks list
callbacks_.push(callback); callbacks_.push(callback);
@ -133,12 +109,7 @@ bool DynamicThreadPool::Add(const std::function<void()>& callback) {
if (threads_waiting_ == 0) { if (threads_waiting_ == 0) {
// Kick off a new thread // Kick off a new thread
nthreads_++; nthreads_++;
bool valid; new DynamicThread(this);
auto* th = new DynamicThread(this, &valid);
if (!valid) {
delete th;
return false;
}
} else { } else {
cv_.notify_one(); cv_.notify_one();
} }
@ -146,7 +117,6 @@ bool DynamicThreadPool::Add(const std::function<void()>& callback) {
if (!dead_threads_.empty()) { if (!dead_threads_.empty()) {
ReapThreads(&dead_threads_); ReapThreads(&dead_threads_);
} }
return true;
} }
} // namespace grpc } // namespace grpc

@ -24,9 +24,9 @@
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <queue> #include <queue>
#include <thread>
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc/support/thd.h>
#include "src/cpp/server/thread_pool_interface.h" #include "src/cpp/server/thread_pool_interface.h"
@ -34,26 +34,20 @@ namespace grpc {
class DynamicThreadPool final : public ThreadPoolInterface { class DynamicThreadPool final : public ThreadPoolInterface {
public: public:
DynamicThreadPool(int reserve_threads, explicit DynamicThreadPool(int reserve_threads);
std::function<int(gpr_thd_id*, const char*, void (*)(void*),
void*, const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner);
~DynamicThreadPool(); ~DynamicThreadPool();
bool Add(const std::function<void()>& callback) override; void Add(const std::function<void()>& callback) override;
private: private:
class DynamicThread { class DynamicThread {
public: public:
DynamicThread(DynamicThreadPool* pool, bool* valid); DynamicThread(DynamicThreadPool* pool);
~DynamicThread(); ~DynamicThread();
private: private:
DynamicThreadPool* pool_; DynamicThreadPool* pool_;
std::mutex dt_mu_; std::unique_ptr<std::thread> thd_;
gpr_thd_id thd_;
bool valid_;
void ThreadFunc(); void ThreadFunc();
}; };
std::mutex mu_; std::mutex mu_;
@ -65,10 +59,6 @@ class DynamicThreadPool final : public ThreadPoolInterface {
int nthreads_; int nthreads_;
int threads_waiting_; int threads_waiting_;
std::list<DynamicThread*> dead_threads_; std::list<DynamicThread*> dead_threads_;
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator_;
std::function<void(gpr_thd_id)> thread_joiner_;
void ThreadFunc(); void ThreadFunc();
static void ReapThreads(std::list<DynamicThread*>* tlist); static void ReapThreads(std::list<DynamicThread*>* tlist);

@ -43,14 +43,9 @@ void AuthMetadataProcessorAyncWrapper::Process(
return; return;
} }
if (w->processor_->IsBlocking()) { if (w->processor_->IsBlocking()) {
bool added = w->thread_pool_->Add( w->thread_pool_->Add(
std::bind(&AuthMetadataProcessorAyncWrapper::InvokeProcessor, w, std::bind(&AuthMetadataProcessorAyncWrapper::InvokeProcessor, w,
context, md, num_md, cb, user_data)); context, md, num_md, cb, user_data));
if (!added) {
// no thread available, so fail with temporary resource unavailability
cb(user_data, nullptr, 0, nullptr, 0, GRPC_STATUS_UNAVAILABLE, nullptr);
return;
}
} else { } else {
// invoke directly. // invoke directly.
w->InvokeProcessor(context, md, num_md, cb, user_data); w->InvokeProcessor(context, md, num_md, cb, user_data);

@ -23,7 +23,6 @@
#include <grpc++/server.h> #include <grpc++/server.h>
#include <grpc/support/cpu.h> #include <grpc/support/cpu.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
#include "src/cpp/server/thread_pool_interface.h" #include "src/cpp/server/thread_pool_interface.h"
@ -44,9 +43,7 @@ ServerBuilder::ServerBuilder()
max_send_message_size_(-1), max_send_message_size_(-1),
sync_server_settings_(SyncServerSettings()), sync_server_settings_(SyncServerSettings()),
resource_quota_(nullptr), resource_quota_(nullptr),
generic_service_(nullptr), generic_service_(nullptr) {
thread_creator_(gpr_thd_new),
thread_joiner_(gpr_thd_join) {
gpr_once_init(&once_init_plugin_list, do_plugin_list_init); gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
for (auto it = g_plugin_factory_list->begin(); for (auto it = g_plugin_factory_list->begin();
it != g_plugin_factory_list->end(); it++) { it != g_plugin_factory_list->end(); it++) {
@ -265,7 +262,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
std::unique_ptr<Server> server(new Server( std::unique_ptr<Server> server(new Server(
max_receive_message_size_, &args, sync_server_cqs, max_receive_message_size_, &args, sync_server_cqs,
sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
sync_server_settings_.cq_timeout_msec, thread_creator_, thread_joiner_)); sync_server_settings_.cq_timeout_msec));
if (has_sync_methods) { if (has_sync_methods) {
// This is a Sync server // This is a Sync server

@ -36,7 +36,6 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include "src/core/ext/transport/inproc/inproc_transport.h" #include "src/core/ext/transport/inproc/inproc_transport.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
@ -196,10 +195,8 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
call_(mrd->call_, server, &cq_, server->max_receive_message_size()), call_(mrd->call_, server, &cq_, server->max_receive_message_size()),
ctx_(mrd->deadline_, &mrd->request_metadata_), ctx_(mrd->deadline_, &mrd->request_metadata_),
has_request_payload_(mrd->has_request_payload_), has_request_payload_(mrd->has_request_payload_),
request_payload_(has_request_payload_ ? mrd->request_payload_ request_payload_(mrd->request_payload_),
: nullptr), method_(mrd->method_) {
method_(mrd->method_),
server_(server) {
ctx_.set_call(mrd->call_); ctx_.set_call(mrd->call_);
ctx_.cq_ = &cq_; ctx_.cq_ = &cq_;
GPR_ASSERT(mrd->in_flight_); GPR_ASSERT(mrd->in_flight_);
@ -213,13 +210,10 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
} }
} }
void Run(std::shared_ptr<GlobalCallbacks> global_callbacks, void Run(std::shared_ptr<GlobalCallbacks> global_callbacks) {
bool resources) {
ctx_.BeginCompletionOp(&call_); ctx_.BeginCompletionOp(&call_);
global_callbacks->PreSynchronousRequest(&ctx_); global_callbacks->PreSynchronousRequest(&ctx_);
auto* handler = resources ? method_->handler() method_->handler()->RunHandler(internal::MethodHandler::HandlerParameter(
: server_->resource_exhausted_handler_.get();
handler->RunHandler(internal::MethodHandler::HandlerParameter(
&call_, &ctx_, request_payload_)); &call_, &ctx_, request_payload_));
global_callbacks->PostSynchronousRequest(&ctx_); global_callbacks->PostSynchronousRequest(&ctx_);
request_payload_ = nullptr; request_payload_ = nullptr;
@ -241,7 +235,6 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
const bool has_request_payload_; const bool has_request_payload_;
grpc_byte_buffer* request_payload_; grpc_byte_buffer* request_payload_;
internal::RpcServiceMethod* const method_; internal::RpcServiceMethod* const method_;
Server* server_;
}; };
private: private:
@ -262,15 +255,11 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
// appropriate RPC handlers // appropriate RPC handlers
class Server::SyncRequestThreadManager : public ThreadManager { class Server::SyncRequestThreadManager : public ThreadManager {
public: public:
SyncRequestThreadManager( SyncRequestThreadManager(Server* server, CompletionQueue* server_cq,
Server* server, CompletionQueue* server_cq, std::shared_ptr<GlobalCallbacks> global_callbacks,
std::shared_ptr<GlobalCallbacks> global_callbacks, int min_pollers, int min_pollers, int max_pollers,
int max_pollers, int cq_timeout_msec, int cq_timeout_msec)
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*, : ThreadManager(min_pollers, max_pollers),
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner)
: ThreadManager(min_pollers, max_pollers, thread_creator, thread_joiner),
server_(server), server_(server),
server_cq_(server_cq), server_cq_(server_cq),
cq_timeout_msec_(cq_timeout_msec), cq_timeout_msec_(cq_timeout_msec),
@ -296,7 +285,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
GPR_UNREACHABLE_CODE(return TIMEOUT); GPR_UNREACHABLE_CODE(return TIMEOUT);
} }
void DoWork(void* tag, bool ok, bool resources) override { void DoWork(void* tag, bool ok) override {
SyncRequest* sync_req = static_cast<SyncRequest*>(tag); SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
if (!sync_req) { if (!sync_req) {
@ -316,7 +305,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
} }
GPR_TIMER_SCOPE("cd.Run()", 0); GPR_TIMER_SCOPE("cd.Run()", 0);
cd.Run(global_callbacks_, resources); cd.Run(global_callbacks_);
} }
// TODO (sreek) If ok is false here (which it isn't in case of // TODO (sreek) If ok is false here (which it isn't in case of
// grpc_request_registered_call), we should still re-queue the request // grpc_request_registered_call), we should still re-queue the request
@ -378,11 +367,7 @@ Server::Server(
int max_receive_message_size, ChannelArguments* args, int max_receive_message_size, ChannelArguments* args,
std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>> std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
sync_server_cqs, sync_server_cqs,
int min_pollers, int max_pollers, int sync_cq_timeout_msec, int min_pollers, int max_pollers, int sync_cq_timeout_msec)
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner)
: max_receive_message_size_(max_receive_message_size), : max_receive_message_size_(max_receive_message_size),
sync_server_cqs_(sync_server_cqs), sync_server_cqs_(sync_server_cqs),
started_(false), started_(false),
@ -391,9 +376,7 @@ Server::Server(
has_generic_service_(false), has_generic_service_(false),
server_(nullptr), server_(nullptr),
server_initializer_(new ServerInitializer(this)), server_initializer_(new ServerInitializer(this)),
health_check_service_disabled_(false), health_check_service_disabled_(false) {
thread_creator_(thread_creator),
thread_joiner_(thread_joiner) {
g_gli_initializer.summon(); g_gli_initializer.summon();
gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks); gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks);
global_callbacks_ = g_callbacks; global_callbacks_ = g_callbacks;
@ -403,7 +386,7 @@ Server::Server(
it++) { it++) {
sync_req_mgrs_.emplace_back(new SyncRequestThreadManager( sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
this, (*it).get(), global_callbacks_, min_pollers, max_pollers, this, (*it).get(), global_callbacks_, min_pollers, max_pollers,
sync_cq_timeout_msec, thread_creator_, thread_joiner_)); sync_cq_timeout_msec));
} }
grpc_channel_args channel_args; grpc_channel_args channel_args;
@ -566,10 +549,6 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
} }
} }
if (!sync_server_cqs_->empty()) {
resource_exhausted_handler_.reset(new internal::ResourceExhaustedHandler);
}
for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
(*it)->Start(); (*it)->Start();
} }

@ -29,9 +29,7 @@ class ThreadPoolInterface {
virtual ~ThreadPoolInterface() {} virtual ~ThreadPoolInterface() {}
// Schedule the given callback for execution. // Schedule the given callback for execution.
// Return true on success, false on failure virtual void Add(const std::function<void()>& callback) = 0;
virtual bool Add(const std::function<void()>& callback)
GRPC_MUST_USE_RESULT = 0;
}; };
// Allows different codebases to use their own thread pool impls // Allows different codebases to use their own thread pool impls

@ -20,26 +20,18 @@
#include <climits> #include <climits>
#include <mutex> #include <mutex>
#include <thread>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h>
namespace grpc { namespace grpc {
ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr, bool* valid) ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr)
: thd_mgr_(thd_mgr) { : thd_mgr_(thd_mgr) {
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
// Make thread creation exclusive with respect to its join happening in // Make thread creation exclusive with respect to its join happening in
// ~WorkerThread(). // ~WorkerThread().
std::lock_guard<std::mutex> lock(wt_mu_); std::lock_guard<std::mutex> lock(wt_mu_);
*valid = valid_ = thd_mgr->thread_creator_( thd_ = std::thread(&ThreadManager::WorkerThread::Run, this);
&thd_, "worker thread",
[](void* th) {
reinterpret_cast<ThreadManager::WorkerThread*>(th)->Run();
},
this, &opt);
} }
void ThreadManager::WorkerThread::Run() { void ThreadManager::WorkerThread::Run() {
@ -50,24 +42,15 @@ void ThreadManager::WorkerThread::Run() {
ThreadManager::WorkerThread::~WorkerThread() { ThreadManager::WorkerThread::~WorkerThread() {
// Don't join until the thread is fully constructed. // Don't join until the thread is fully constructed.
std::lock_guard<std::mutex> lock(wt_mu_); std::lock_guard<std::mutex> lock(wt_mu_);
if (valid_) { thd_.join();
thd_mgr_->thread_joiner_(thd_);
}
} }
ThreadManager::ThreadManager( ThreadManager::ThreadManager(int min_pollers, int max_pollers)
int min_pollers, int max_pollers,
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner)
: shutdown_(false), : shutdown_(false),
num_pollers_(0), num_pollers_(0),
min_pollers_(min_pollers), min_pollers_(min_pollers),
max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers),
num_threads_(0), num_threads_(0) {}
thread_creator_(thread_creator),
thread_joiner_(thread_joiner) {}
ThreadManager::~ThreadManager() { ThreadManager::~ThreadManager() {
{ {
@ -128,9 +111,7 @@ void ThreadManager::Initialize() {
for (int i = 0; i < min_pollers_; i++) { for (int i = 0; i < min_pollers_; i++) {
// Create a new thread (which ends up calling the MainWorkLoop() function // Create a new thread (which ends up calling the MainWorkLoop() function
bool valid; new WorkerThread(this);
new WorkerThread(this, &valid);
GPR_ASSERT(valid); // we need to have at least this minimum
} }
} }
@ -157,27 +138,18 @@ void ThreadManager::MainWorkLoop() {
case WORK_FOUND: case WORK_FOUND:
// If we got work and there are now insufficient pollers, start a new // If we got work and there are now insufficient pollers, start a new
// one // one
bool resources;
if (!shutdown_ && num_pollers_ < min_pollers_) { if (!shutdown_ && num_pollers_ < min_pollers_) {
bool valid; num_pollers_++;
num_threads_++;
// Drop lock before spawning thread to avoid contention // Drop lock before spawning thread to avoid contention
lock.unlock(); lock.unlock();
auto* th = new WorkerThread(this, &valid); new WorkerThread(this);
lock.lock();
if (valid) {
num_pollers_++;
num_threads_++;
} else {
delete th;
}
resources = (num_pollers_ > 0);
} else { } else {
resources = true; // Drop lock for consistency with above branch
lock.unlock();
} }
// Drop lock before any application work
lock.unlock();
// Lock is always released at this point - do the application work // Lock is always released at this point - do the application work
DoWork(tag, ok, resources); DoWork(tag, ok);
// Take the lock again to check post conditions // Take the lock again to check post conditions
lock.lock(); lock.lock();
// If we're shutdown, we should finish at this point. // If we're shutdown, we should finish at this point.

@ -20,23 +20,18 @@
#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H #define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
#include <condition_variable> #include <condition_variable>
#include <functional>
#include <list> #include <list>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <thread>
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc/support/thd.h>
namespace grpc { namespace grpc {
class ThreadManager { class ThreadManager {
public: public:
ThreadManager(int min_pollers, int max_pollers, explicit ThreadManager(int min_pollers, int max_pollers);
std::function<int(gpr_thd_id*, const char*, void (*)(void*),
void*, const gpr_thd_options*)>
thread_creator,
std::function<void(gpr_thd_id)> thread_joiner);
virtual ~ThreadManager(); virtual ~ThreadManager();
// Initializes and Starts the Rpc Manager threads // Initializes and Starts the Rpc Manager threads
@ -55,8 +50,6 @@ class ThreadManager {
// - ThreadManager does not interpret the values of 'tag' and 'ok' // - ThreadManager does not interpret the values of 'tag' and 'ok'
// - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to // - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to
// DoWork() // DoWork()
// - ThreadManager will also pass DoWork a bool saying if there are actually
// resources to do the work
// //
// If the return value is SHUTDOWN:, // If the return value is SHUTDOWN:,
// - ThreadManager WILL NOT call DoWork() and terminates the thead // - ThreadManager WILL NOT call DoWork() and terminates the thead
@ -76,7 +69,7 @@ class ThreadManager {
// The implementation of DoWork() should also do any setup needed to ensure // The implementation of DoWork() should also do any setup needed to ensure
// that the next call to PollForWork() (not necessarily by the current thread) // that the next call to PollForWork() (not necessarily by the current thread)
// actually finds some work // actually finds some work
virtual void DoWork(void* tag, bool ok, bool resources) = 0; virtual void DoWork(void* tag, bool ok) = 0;
// Mark the ThreadManager as shutdown and begin draining the work. This is a // Mark the ThreadManager as shutdown and begin draining the work. This is a
// non-blocking call and the caller should call Wait(), a blocking call which // non-blocking call and the caller should call Wait(), a blocking call which
@ -91,15 +84,15 @@ class ThreadManager {
virtual void Wait(); virtual void Wait();
private: private:
// Helper wrapper class around thread. This takes a ThreadManager object // Helper wrapper class around std::thread. This takes a ThreadManager object
// and starts a new thread to calls the Run() function. // and starts a new std::thread to calls the Run() function.
// //
// The Run() function calls ThreadManager::MainWorkLoop() function and once // The Run() function calls ThreadManager::MainWorkLoop() function and once
// that completes, it marks the WorkerThread completed by calling // that completes, it marks the WorkerThread completed by calling
// ThreadManager::MarkAsCompleted() // ThreadManager::MarkAsCompleted()
class WorkerThread { class WorkerThread {
public: public:
WorkerThread(ThreadManager* thd_mgr, bool* valid); WorkerThread(ThreadManager* thd_mgr);
~WorkerThread(); ~WorkerThread();
private: private:
@ -109,8 +102,7 @@ class ThreadManager {
ThreadManager* const thd_mgr_; ThreadManager* const thd_mgr_;
std::mutex wt_mu_; std::mutex wt_mu_;
gpr_thd_id thd_; std::thread thd_;
bool valid_;
}; };
// The main funtion in ThreadManager // The main funtion in ThreadManager
@ -137,13 +129,6 @@ class ThreadManager {
// currently polling i.e num_pollers_) // currently polling i.e num_pollers_)
int num_threads_; int num_threads_;
// Functions for creating/joining threads. Normally, these should
// be gpr_thd_new/gpr_thd_join but they are overridable
std::function<int(gpr_thd_id*, const char*, void (*)(void*), void*,
const gpr_thd_options*)>
thread_creator_;
std::function<void(gpr_thd_id)> thread_joiner_;
std::mutex list_mu_; std::mutex list_mu_;
std::list<WorkerThread*> completed_threads_; std::list<WorkerThread*> completed_threads_;
}; };

@ -27,7 +27,8 @@ namespace Grpc.Core.Internal
internal class NativeMetadataCredentialsPlugin internal class NativeMetadataCredentialsPlugin
{ {
const string GetMetadataExceptionMsg = "Exception occured in metadata credentials plugin."; const string GetMetadataExceptionStatusMsg = "Exception occurred in metadata credentials plugin.";
const string GetMetadataExceptionLogMsg = GetMetadataExceptionStatusMsg + " This is likely not a problem with gRPC itself. Please verify that the code supplying the metadata (usually an authentication token) works correctly.";
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<NativeMetadataCredentialsPlugin>(); static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<NativeMetadataCredentialsPlugin>();
static readonly NativeMethods Native = NativeMethods.Get(); static readonly NativeMethods Native = NativeMethods.Get();
@ -67,8 +68,8 @@ namespace Grpc.Core.Internal
} }
catch (Exception e) catch (Exception e)
{ {
Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionMsg); Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionStatusMsg);
Logger.Error(e, GetMetadataExceptionMsg); Logger.Error(e, GetMetadataExceptionLogMsg);
} }
} }
@ -86,8 +87,8 @@ namespace Grpc.Core.Internal
} }
catch (Exception e) catch (Exception e)
{ {
Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionMsg); Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionStatusMsg);
Logger.Error(e, GetMetadataExceptionMsg); Logger.Error(e, GetMetadataExceptionLogMsg);
} }
} }
} }

@ -162,7 +162,7 @@ namespace Grpc.IntegrationTesting
client = new TestService.TestServiceClient(channel); client = new TestService.TestServiceClient(channel);
var ex = Assert.Throws<RpcException>(() => client.UnaryCall(new SimpleRequest { })); var ex = Assert.Throws<RpcException>(() => client.UnaryCall(new SimpleRequest { }));
Assert.AreEqual(StatusCode.Unauthenticated, ex.Status.StatusCode); Assert.AreEqual(StatusCode.Unavailable, ex.Status.StatusCode);
} }
private class FakeTestService : TestService.TestServiceBase private class FakeTestService : TestService.TestServiceBase

@ -136,12 +136,12 @@ Pod::Spec.new do |s|
# Replace "const BIGNUM *I" in rsa.h with a lowercase i, as the former fails when including # Replace "const BIGNUM *I" in rsa.h with a lowercase i, as the former fails when including
# OpenSSL in a Swift bridging header (complex.h defines "I", and it's as if the compiler # OpenSSL in a Swift bridging header (complex.h defines "I", and it's as if the compiler
# included it in every bridged header). # included it in every bridged header).
sed -E -i '.back' 's/\\*I,/*i,/g' include/openssl/rsa.h sed -E -i'.back' 's/\\*I,/*i,/g' include/openssl/rsa.h
# Replace `#include "../crypto/internal.h"` in e_tls.c with `#include "../internal.h"`. The # Replace `#include "../crypto/internal.h"` in e_tls.c with `#include "../internal.h"`. The
# former assumes crypto/ is in the headers search path, which is hard to enforce when using # former assumes crypto/ is in the headers search path, which is hard to enforce when using
# dynamic frameworks. The latters always works, being relative to the current file. # dynamic frameworks. The latters always works, being relative to the current file.
sed -E -i '.back' 's/crypto\\///g' crypto/cipher/e_tls.c sed -E -i'.back' 's/crypto\\///g' crypto/cipher/e_tls.c
# Add a module map and an umbrella header # Add a module map and an umbrella header
cat > include/openssl/umbrella.h <<EOF cat > include/openssl/umbrella.h <<EOF
@ -197,11 +197,11 @@ Pod::Spec.new do |s|
# https://github.com/libgit2/libgit2/commit/1ddada422caf8e72ba97dca2568d2bf879fed5f2 and libvpx # https://github.com/libgit2/libgit2/commit/1ddada422caf8e72ba97dca2568d2bf879fed5f2 and libvpx
# in https://chromium.googlesource.com/webm/libvpx/+/1bec0c5a7e885ec792f6bb658eb3f34ad8f37b15 # in https://chromium.googlesource.com/webm/libvpx/+/1bec0c5a7e885ec792f6bb658eb3f34ad8f37b15
# work around it by removing the include. We need four of its macros, so we expand them here. # work around it by removing the include. We need four of its macros, so we expand them here.
sed -E -i '.back' '/<inttypes.h>/d' include/openssl/bn.h sed -E -i'.back' '/<inttypes.h>/d' include/openssl/bn.h
sed -E -i '.back' 's/PRIu32/"u"/g' include/openssl/bn.h sed -E -i'.back' 's/PRIu32/"u"/g' include/openssl/bn.h
sed -E -i '.back' 's/PRIx32/"x"/g' include/openssl/bn.h sed -E -i'.back' 's/PRIx32/"x"/g' include/openssl/bn.h
sed -E -i '.back' 's/PRIu64/"llu"/g' include/openssl/bn.h sed -E -i'.back' 's/PRIu64/"llu"/g' include/openssl/bn.h
sed -E -i '.back' 's/PRIx64/"llx"/g' include/openssl/bn.h sed -E -i'.back' 's/PRIx64/"llx"/g' include/openssl/bn.h
# This is a bit ridiculous, but requiring people to install Go in order to build is slightly # This is a bit ridiculous, but requiring people to install Go in order to build is slightly
# more ridiculous IMO. To save you from scrolling, this is the last part of the podspec. # more ridiculous IMO. To save you from scrolling, this is the last part of the podspec.

@ -1,5 +1,12 @@
[![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC) [![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC)
# gRPC for Objective-C # gRPC for Objective-C
gRPC Objective C library provides Objective C API for users to make gRPC calls on iOS or OS X
platforms. Currently, the minimum supported iOS version is 7.0 and OS X version is 10.9 (Mavericks).
While gRPC doesn't require the use of an IDL to describe the API of services, using one simplifies
usage and adds some interoperability guarantees. Here we use [Protocol Buffers][], and provide a
plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC
services.
- [Write your API declaration in proto format](#write-protos) - [Write your API declaration in proto format](#write-protos)
- [Integrate a proto library in your project](#cocoapods) - [Integrate a proto library in your project](#cocoapods)
@ -10,11 +17,6 @@
- [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew) - [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew)
- [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods) - [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods)
While gRPC doesn't require the use of an IDL to describe the API of services, using one simplifies
usage and adds some interoperability guarantees. Here we use [Protocol Buffers][], and provide a
plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC
services.
<a name="write-protos"></a> <a name="write-protos"></a>
## Write your API declaration in proto format ## Write your API declaration in proto format

@ -242,6 +242,7 @@ PHP_METHOD(Channel, __construct) {
// parse the rest of the channel args array // parse the rest of the channel args array
if (php_grpc_read_args_array(args_array, &args TSRMLS_CC) == FAILURE) { if (php_grpc_read_args_array(args_array, &args TSRMLS_CC) == FAILURE) {
efree(args.args);
return; return;
} }
@ -301,6 +302,7 @@ PHP_METHOD(Channel, __construct) {
create_and_add_channel_to_persistent_list( create_and_add_channel_to_persistent_list(
channel, target, args, creds, key, key_len TSRMLS_CC); channel, target, args, creds, key, key_len TSRMLS_CC);
} else { } else {
efree(args.args);
channel->wrapper = le->channel; channel->wrapper = le->channel;
} }
} }

@ -147,7 +147,7 @@ class CallCredentials2Test extends PHPUnit_Framework_TestCase
$this->assertTrue($event->send_metadata); $this->assertTrue($event->send_metadata);
$this->assertTrue($event->send_close); $this->assertTrue($event->send_close);
$this->assertTrue($event->status->code == Grpc\STATUS_UNAUTHENTICATED); $this->assertTrue($event->status->code == Grpc\STATUS_UNAVAILABLE);
} }
public function invalidReturnCallbackFunc($context) public function invalidReturnCallbackFunc($context)
@ -179,6 +179,6 @@ class CallCredentials2Test extends PHPUnit_Framework_TestCase
$this->assertTrue($event->send_metadata); $this->assertTrue($event->send_metadata);
$this->assertTrue($event->send_close); $this->assertTrue($event->send_close);
$this->assertTrue($event->status->code == Grpc\STATUS_UNAUTHENTICATED); $this->assertTrue($event->status->code == Grpc\STATUS_UNAVAILABLE);
} }
} }

File diff suppressed because it is too large Load Diff

@ -277,6 +277,12 @@ class _Context(grpc.ServicerContext):
self._state.trailing_metadata = trailing_metadata self._state.trailing_metadata = trailing_metadata
def abort(self, code, details): def abort(self, code, details):
# treat OK like other invalid arguments: fail the RPC
if code == grpc.StatusCode.OK:
logging.error(
'abort() called with StatusCode.OK; returning UNKNOWN')
code = grpc.StatusCode.UNKNOWN
details = ''
with self._state.condition: with self._state.condition:
self._state.code = code self._state.code = code
self._state.details = _common.encode(details) self._state.details = _common.encode(details)

@ -50,6 +50,12 @@ _SERVER_TRAILING_METADATA = (('server-trailing-md-key',
_NON_OK_CODE = grpc.StatusCode.NOT_FOUND _NON_OK_CODE = grpc.StatusCode.NOT_FOUND
_DETAILS = 'Test details!' _DETAILS = 'Test details!'
# calling abort should always fail an RPC, even for "invalid" codes
_ABORT_CODES = (_NON_OK_CODE, 3, grpc.StatusCode.OK)
_EXPECTED_CLIENT_CODES = (_NON_OK_CODE, grpc.StatusCode.UNKNOWN,
grpc.StatusCode.UNKNOWN)
_EXPECTED_DETAILS = (_DETAILS, _DETAILS, '')
class _Servicer(object): class _Servicer(object):
@ -302,99 +308,119 @@ class MetadataCodeDetailsTest(unittest.TestCase):
self.assertEqual(_DETAILS, response_iterator_call.details()) self.assertEqual(_DETAILS, response_iterator_call.details())
def testAbortedUnaryUnary(self): def testAbortedUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE) test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
self._servicer.set_details(_DETAILS) _EXPECTED_DETAILS)
self._servicer.set_abort_call() for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
with self.assertRaises(grpc.RpcError) as exception_context: self._servicer.set_details(_DETAILS)
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA) self._servicer.set_abort_call()
self.assertTrue( with self.assertRaises(grpc.RpcError) as exception_context:
test_common.metadata_transmitted( self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue( self.assertTrue(
test_common.metadata_transmitted( test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, _CLIENT_METADATA,
exception_context.exception.initial_metadata())) self._servicer.received_client_metadata()))
self.assertTrue( self.assertTrue(
test_common.metadata_transmitted( test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, _SERVER_INITIAL_METADATA,
exception_context.exception.trailing_metadata())) exception_context.exception.initial_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code()) self.assertTrue(
self.assertEqual(_DETAILS, exception_context.exception.details()) test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(expected_code, exception_context.exception.code())
self.assertEqual(expected_details,
exception_context.exception.details())
def testAbortedUnaryStream(self): def testAbortedUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE) test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
self._servicer.set_details(_DETAILS) _EXPECTED_DETAILS)
self._servicer.set_abort_call() for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
response_iterator_call = self._unary_stream( self._servicer.set_details(_DETAILS)
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA) self._servicer.set_abort_call()
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError): response_iterator_call = self._unary_stream(
self.assertEqual(len(list(response_iterator_call)), 0) _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = \
self.assertTrue( response_iterator_call.initial_metadata()
test_common.metadata_transmitted( with self.assertRaises(grpc.RpcError):
_CLIENT_METADATA, self._servicer.received_client_metadata())) self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA, self.assertTrue(
received_initial_metadata)) test_common.metadata_transmitted(
self.assertTrue( _CLIENT_METADATA,
test_common.metadata_transmitted( self._servicer.received_client_metadata()))
_SERVER_TRAILING_METADATA, self.assertTrue(
response_iterator_call.trailing_metadata())) test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
self.assertIs(_NON_OK_CODE, response_iterator_call.code()) received_initial_metadata))
self.assertEqual(_DETAILS, response_iterator_call.details()) self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(expected_code, response_iterator_call.code())
self.assertEqual(expected_details, response_iterator_call.details())
def testAbortedStreamUnary(self): def testAbortedStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE) test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
self._servicer.set_details(_DETAILS) _EXPECTED_DETAILS)
self._servicer.set_abort_call() for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
with self.assertRaises(grpc.RpcError) as exception_context: self._servicer.set_details(_DETAILS)
self._stream_unary.with_call( self._servicer.set_abort_call()
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA) with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
self.assertTrue( iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
test_common.metadata_transmitted( metadata=_CLIENT_METADATA)
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue( self.assertTrue(
test_common.metadata_transmitted( test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, _CLIENT_METADATA,
exception_context.exception.initial_metadata())) self._servicer.received_client_metadata()))
self.assertTrue( self.assertTrue(
test_common.metadata_transmitted( test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, _SERVER_INITIAL_METADATA,
exception_context.exception.trailing_metadata())) exception_context.exception.initial_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code()) self.assertTrue(
self.assertEqual(_DETAILS, exception_context.exception.details()) test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(expected_code, exception_context.exception.code())
self.assertEqual(expected_details,
exception_context.exception.details())
def testAbortedStreamStream(self): def testAbortedStreamStream(self):
self._servicer.set_code(_NON_OK_CODE) test_cases = zip(_ABORT_CODES, _EXPECTED_CLIENT_CODES,
self._servicer.set_details(_DETAILS) _EXPECTED_DETAILS)
self._servicer.set_abort_call() for abort_code, expected_code, expected_details in test_cases:
self._servicer.set_code(abort_code)
response_iterator_call = self._stream_stream( self._servicer.set_details(_DETAILS)
iter([object()] * test_constants.STREAM_LENGTH), self._servicer.set_abort_call()
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata() response_iterator_call = self._stream_stream(
with self.assertRaises(grpc.RpcError): iter([object()] * test_constants.STREAM_LENGTH),
self.assertEqual(len(list(response_iterator_call)), 0) metadata=_CLIENT_METADATA)
received_initial_metadata = \
self.assertTrue( response_iterator_call.initial_metadata()
test_common.metadata_transmitted( with self.assertRaises(grpc.RpcError):
_CLIENT_METADATA, self._servicer.received_client_metadata())) self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA, self.assertTrue(
received_initial_metadata)) test_common.metadata_transmitted(
self.assertTrue( _CLIENT_METADATA,
test_common.metadata_transmitted( self._servicer.received_client_metadata()))
_SERVER_TRAILING_METADATA, self.assertTrue(
response_iterator_call.trailing_metadata())) test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
self.assertIs(_NON_OK_CODE, response_iterator_call.code()) received_initial_metadata))
self.assertEqual(_DETAILS, response_iterator_call.details()) self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(expected_code, response_iterator_call.code())
self.assertEqual(expected_details, response_iterator_call.details())
def testCustomCodeUnaryUnary(self): def testCustomCodeUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE) self._servicer.set_code(_NON_OK_CODE)

@ -228,7 +228,7 @@ describe 'ClientStub' do
th.join th.join
end end
it 'should receive UNAUTHENTICATED if call credentials plugin fails' do it 'should receive UNAVAILABLE if call credentials plugin fails' do
server_port = create_secure_test_server server_port = create_secure_test_server
th = run_request_response(@sent_msg, @resp, @pass) th = run_request_response(@sent_msg, @resp, @pass)
@ -252,7 +252,7 @@ describe 'ClientStub' do
unauth_error_occured = false unauth_error_occured = false
begin begin
get_response(stub, credentials: creds) get_response(stub, credentials: creds)
rescue GRPC::Unauthenticated => e rescue GRPC::Unavailable => e
unauth_error_occured = true unauth_error_occured = true
expect(e.details.include?(error_message)).to be true expect(e.details.include?(error_message)).to be true
end end

@ -19,15 +19,8 @@ gRPC C Core:
1. Port gRPC to one of the major BSD platforms ([FreeBSD](https://freebsd.org), [NetBSD](https://netbsd.org), and [OpenBSD](https://openbsd.org)) and create packages for them. Add [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) support in the process. 1. Port gRPC to one of the major BSD platforms ([FreeBSD](https://freebsd.org), [NetBSD](https://netbsd.org), and [OpenBSD](https://openbsd.org)) and create packages for them. Add [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) support in the process.
* **Required skills:** C programming language, BSD operating system. * **Required skills:** C programming language, BSD operating system.
* **Likely mentors:** [Craig Tiller](https://github.com/ctiller), * **Likely mentors:** [Nicolas Noble](https://github.com/nicolasnoble),
[Nicolas Noble](https://github.com/nicolasnoble),
[Vijay Pai](https://github.com/vjpai). [Vijay Pai](https://github.com/vjpai).
1. Fix gRPC C-core's URI parser. The current parser does not qualify as a standard parser according to [RFC3986]( https://tools.ietf.org/html/rfc3986). Write test suites to verify this and make changes necessary to make the URI parser compliant.
* **Required skills:** C programming language, HTTP standard compliance.
* **Likely mentors:** [Craig Tiller](https://github.com/ctiller).
1. HPACK compression efficiency evaluation - Figure out how to benchmark gRPC's compression efficiency (both in terms of bytes on the wire and cpu cycles). Implement benchmarks. Potentially extend this to other full-stack gRPC implementations (Java and Go).
* **Required skills:** C programming language, software performance benchmarking, potentially Java and Go.
* **Likely mentors:** [Craig Tiller](https://github.com/ctiller).
gRPC Python: gRPC Python:
@ -38,7 +31,7 @@ gRPC Python:
1. Develop and test Python 3.5 Support for gRPC. Make necessary changes to port gRPC and package it for supported platforms. 1. Develop and test Python 3.5 Support for gRPC. Make necessary changes to port gRPC and package it for supported platforms.
* **Required skills:** Python programming language, Python 3.5 interpreter. * **Required skills:** Python programming language, Python 3.5 interpreter.
* **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Masood Malekghassemi](https://github.com/soltanmm). * **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Masood Malekghassemi](https://github.com/soltanmm).
gRPC Ruby/Java: gRPC Ruby/Java:
1. [jRuby](http://jruby.org) support for gRPC. Develop a jRuby wrapper for gRPC based on grpc-java and ensure that it is API compatible with the existing Ruby implementation and passes all tests. 1. [jRuby](http://jruby.org) support for gRPC. Develop a jRuby wrapper for gRPC based on grpc-java and ensure that it is API compatible with the existing Ruby implementation and passes all tests.

@ -48,7 +48,10 @@
deps.append("${_gRPC_CARES_LIBRARIES}") deps.append("${_gRPC_CARES_LIBRARIES}")
deps.append("${_gRPC_ALLTARGETS_LIBRARIES}") deps.append("${_gRPC_ALLTARGETS_LIBRARIES}")
for d in target_dict.get('deps', []): for d in target_dict.get('deps', []):
deps.append(d) if d == 'benchmark':
deps.append("${_gRPC_BENCHMARK_LIBRARIES}")
else:
deps.append(d)
if target_dict.build == 'test' and target_dict.language == 'c++': if target_dict.build == 'test' and target_dict.language == 'c++':
deps.append("${_gRPC_GFLAGS_LIBRARIES}") deps.append("${_gRPC_GFLAGS_LIBRARIES}")
return deps return deps
@ -90,6 +93,10 @@
set(gRPC_INSTALL <%text>${gRPC_INSTALL_default}</%text> CACHE BOOL set(gRPC_INSTALL <%text>${gRPC_INSTALL_default}</%text> CACHE BOOL
"Generate installation target: gRPC_ZLIB_PROVIDER, gRPC_CARES_PROVIDER, gRPC_SSL_PROVIDER and gRPC_PROTOBUF_PROVIDER must all be \"package\"") "Generate installation target: gRPC_ZLIB_PROVIDER, gRPC_CARES_PROVIDER, gRPC_SSL_PROVIDER and gRPC_PROTOBUF_PROVIDER must all be \"package\"")
# Providers for third-party dependencies (gRPC_*_PROVIDER properties):
# "module": build the dependency using sources from git submodule (under third_party)
# "package": use cmake's find_package functionality to locate a pre-installed dependency
set(gRPC_ZLIB_PROVIDER "module" CACHE STRING "Provider of zlib library") set(gRPC_ZLIB_PROVIDER "module" CACHE STRING "Provider of zlib library")
set_property(CACHE gRPC_ZLIB_PROVIDER PROPERTY STRINGS "module" "package") set_property(CACHE gRPC_ZLIB_PROVIDER PROPERTY STRINGS "module" "package")
@ -190,7 +197,7 @@
return() return()
endif() endif()
set(_protobuf_include_path -I . -I <%text>${PROTOBUF_WELLKNOWN_IMPORT_DIR}</%text>) set(_protobuf_include_path -I . -I <%text>${_gRPC_PROTOBUF_WELLKNOWN_INCLUDE_DIR}</%text>)
foreach(FIL <%text>${ARGN}</%text>) foreach(FIL <%text>${ARGN}</%text>)
get_filename_component(ABS_FIL <%text>${FIL}</%text> ABSOLUTE) get_filename_component(ABS_FIL <%text>${FIL}</%text> ABSOLUTE)
get_filename_component(FIL_WE <%text>${FIL}</%text> NAME_WE) get_filename_component(FIL_WE <%text>${FIL}</%text> NAME_WE)
@ -346,13 +353,11 @@
PUBLIC <%text>$<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text> PUBLIC <%text>$<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text>
PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text> PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text> PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src PRIVATE <%text>${_gRPC_PROTOBUF_INCLUDE_DIR}</%text>
PRIVATE <%text>${ZLIB_INCLUDE_DIR}</%text> PRIVATE <%text>${_gRPC_ZLIB_INCLUDE_DIR}</%text>
PRIVATE <%text>${BENCHMARK}</%text>/include PRIVATE <%text>${_gRPC_BENCHMARK_INCLUDE_DIR}</%text>
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/zlib PRIVATE <%text>${_gRPC_CARES_INCLUDE_DIR}</%text>
PRIVATE <%text>${CARES_INCLUDE_DIR}</%text> PRIVATE <%text>${_gRPC_GFLAGS_INCLUDE_DIR}</%text>
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/cares/cares
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/gflags/include
% if lib.build in ['test', 'private'] and lib.language == 'c++': % if lib.build in ['test', 'private'] and lib.language == 'c++':
PRIVATE third_party/googletest/googletest/include PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest PRIVATE third_party/googletest/googletest
@ -417,13 +422,11 @@
PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text> PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include
PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text> PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src PRIVATE <%text>${_gRPC_PROTOBUF_INCLUDE_DIR}</%text>
PRIVATE <%text>${BENCHMARK_ROOT_DIR}</%text>/include PRIVATE <%text>${_gRPC_ZLIB_INCLUDE_DIR}</%text>
PRIVATE <%text>${ZLIB_ROOT_DIR}</%text> PRIVATE <%text>${_gRPC_BENCHMARK_INCLUDE_DIR}</%text>
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/zlib PRIVATE <%text>${_gRPC_CARES_INCLUDE_DIR}</%text>
PRIVATE <%text>${CARES_INCLUDE_DIR}</%text> PRIVATE <%text>${_gRPC_GFLAGS_INCLUDE_DIR}</%text>
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/cares/cares
PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/gflags/include
% if tgt.build in ['test', 'private'] and tgt.language == 'c++': % if tgt.build in ['test', 'private'] and tgt.language == 'c++':
PRIVATE third_party/googletest/googletest/include PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest PRIVATE third_party/googletest/googletest

@ -563,7 +563,6 @@
ZLIB_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libz.a ZLIB_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libz.a
ZLIB_MERGE_OBJS = $(LIBZ_OBJS) ZLIB_MERGE_OBJS = $(LIBZ_OBJS)
CPPFLAGS += -Ithird_party/zlib CPPFLAGS += -Ithird_party/zlib
LDFLAGS += -L$(LIBDIR)/$(CONFIG)/zlib
else else
ifeq ($(HAS_PKG_CONFIG),true) ifeq ($(HAS_PKG_CONFIG),true)
CPPFLAGS += $(shell $(PKG_CONFIG) --cflags zlib) CPPFLAGS += $(shell $(PKG_CONFIG) --cflags zlib)
@ -594,7 +593,6 @@
CARES_MERGE_OBJS = $(LIBARES_OBJS) CARES_MERGE_OBJS = $(LIBARES_OBJS)
CARES_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libares.a CARES_MERGE_LIBS = $(LIBDIR)/$(CONFIG)/libares.a
CPPFLAGS := -Ithird_party/cares -Ithird_party/cares/cares $(CPPFLAGS) CPPFLAGS := -Ithird_party/cares -Ithird_party/cares/cares $(CPPFLAGS)
LDFLAGS := -L$(LIBDIR)/$(CONFIG)/c-ares $(LDFLAGS)
else else
ifeq ($(HAS_PKG_CONFIG),true) ifeq ($(HAS_PKG_CONFIG),true)
PC_REQUIRES_GRPC += libcares PC_REQUIRES_GRPC += libcares
@ -896,10 +894,10 @@
$(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure
$(E) "[MAKE] Building protobuf" $(E) "[MAKE] Building protobuf"
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf
$(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS)) $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS))
$(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf clean
$(Q)$(MAKE) -C third_party/protobuf $(Q)$(MAKE) -C third_party/protobuf
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf
$(Q)mkdir -p $(BINDIR)/$(CONFIG)/protobuf $(Q)mkdir -p $(BINDIR)/$(CONFIG)/protobuf
$(Q)cp third_party/protobuf/src/.libs/libprotoc.a $(LIBDIR)/$(CONFIG)/protobuf $(Q)cp third_party/protobuf/src/.libs/libprotoc.a $(LIBDIR)/$(CONFIG)/protobuf
$(Q)cp third_party/protobuf/src/.libs/libprotobuf.a $(LIBDIR)/$(CONFIG)/protobuf $(Q)cp third_party/protobuf/src/.libs/libprotobuf.a $(LIBDIR)/$(CONFIG)/protobuf

@ -202,6 +202,6 @@
# TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path? # TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path?
s.prepare_command = <<-END_OF_COMMAND s.prepare_command = <<-END_OF_COMMAND
find src/core/ -type f -exec sed -E -i '.back' 's;#include "third_party/nanopb/(.*)";#include <nanopb/\\1>;g' {} \\\; find src/core/ -type f -exec sed -E -i'.back' 's;#include "third_party/nanopb/(.*)";#include <nanopb/\\1>;g' {} \\\;
END_OF_COMMAND END_OF_COMMAND
end end

@ -3,3 +3,5 @@ RUN wget http://releases.llvm.org/5.0.0/clang+llvm-5.0.0-linux-x86_64-ubuntu14.0
RUN tar xf clang+llvm-5.0.0-linux-x86_64-ubuntu14.04.tar.xz RUN tar xf clang+llvm-5.0.0-linux-x86_64-ubuntu14.04.tar.xz
RUN ln -s /clang+llvm-5.0.0-linux-x86_64-ubuntu14.04/bin/clang-format /usr/local/bin/clang-format RUN ln -s /clang+llvm-5.0.0-linux-x86_64-ubuntu14.04/bin/clang-format /usr/local/bin/clang-format
ENV CLANG_FORMAT=clang-format ENV CLANG_FORMAT=clang-format
RUN ln -s /clang+llvm-5.0.0-linux-x86_64-ubuntu14.04/bin/clang-tidy /usr/local/bin/clang-tidy
ENV CLANG_TIDY=clang-tidy

@ -16,8 +16,8 @@
FROM debian:jessie FROM debian:jessie
<%include file="../clang_format.include"/> <%include file="../clang5.include"/>
ADD clang_format_all_the_things.sh / ADD clang_format_all_the_things.sh /
CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"]

@ -0,0 +1,24 @@
%YAML 1.2
--- |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian:jessie
<%include file="../clang5.include"/>
<%include file="../python_deps.include"/>
ADD clang_tidy_all_the_things.sh /
CMD ["echo 'Run with tools/distrib/clang_tidy_code.sh'"]

@ -53,7 +53,7 @@
RUN chmod +x ./bazel-0.4.4-installer-linux-x86_64.sh RUN chmod +x ./bazel-0.4.4-installer-linux-x86_64.sh
RUN ./bazel-0.4.4-installer-linux-x86_64.sh RUN ./bazel-0.4.4-installer-linux-x86_64.sh
<%include file="../../clang_format.include"/> <%include file="../../clang5.include"/>
<%include file="../../run_tests_addons.include"/> <%include file="../../run_tests_addons.include"/>
# Define the default command. # Define the default command.

@ -43,14 +43,11 @@
static void* tag(intptr_t i) { return (void*)i; } static void* tag(intptr_t i) { return (void*)i; }
static gpr_timespec ms_from_now(int ms) {
return grpc_timeout_milliseconds_to_deadline(ms);
}
static void drain_cq(grpc_completion_queue* cq) { static void drain_cq(grpc_completion_queue* cq) {
grpc_event ev; grpc_event ev;
do { do {
ev = grpc_completion_queue_next(cq, ms_from_now(5000), nullptr); ev = grpc_completion_queue_next(
cq, grpc_timeout_milliseconds_to_deadline(5000), nullptr);
} while (ev.type != GRPC_QUEUE_SHUTDOWN); } while (ev.type != GRPC_QUEUE_SHUTDOWN);
} }
@ -165,11 +162,11 @@ void test_connect(const char* server_host, const char* client_host, int port,
if (expect_ok) { if (expect_ok) {
/* Normal deadline, shouldn't be reached. */ /* Normal deadline, shouldn't be reached. */
deadline = ms_from_now(60000); deadline = grpc_timeout_milliseconds_to_deadline(60000);
} else { } else {
/* Give up faster when failure is expected. /* Give up faster when failure is expected.
BUG: Setting this to 1000 reveals a memory leak (b/18608927). */ BUG: Setting this to 1000 reveals a memory leak (b/18608927). */
deadline = ms_from_now(1500); deadline = grpc_timeout_milliseconds_to_deadline(3000);
} }
/* Send a trivial request. */ /* Send a trivial request. */

@ -68,6 +68,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void); extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config); extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void); extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config); extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void); extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config); extern void high_initial_seqno(grpc_end2end_test_config config);
@ -170,6 +172,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init(); filter_call_init_fails_pre_init();
filter_causes_close_pre_init(); filter_causes_close_pre_init();
filter_latency_pre_init(); filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init(); graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init(); high_initial_seqno_pre_init();
hpack_size_pre_init(); hpack_size_pre_init();
@ -237,6 +240,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config); filter_call_init_fails(config);
filter_causes_close(config); filter_causes_close(config);
filter_latency(config); filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config); graceful_server_shutdown(config);
high_initial_seqno(config); high_initial_seqno(config);
hpack_size(config); hpack_size(config);
@ -356,6 +360,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config); filter_latency(config);
continue; continue;
} }
if (0 == strcmp("filter_status_code", argv[i])) {
filter_status_code(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) { if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config); graceful_server_shutdown(config);
continue; continue;

@ -70,6 +70,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void); extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config); extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void); extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config); extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void); extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config); extern void high_initial_seqno(grpc_end2end_test_config config);
@ -173,6 +175,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init(); filter_call_init_fails_pre_init();
filter_causes_close_pre_init(); filter_causes_close_pre_init();
filter_latency_pre_init(); filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init(); graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init(); high_initial_seqno_pre_init();
hpack_size_pre_init(); hpack_size_pre_init();
@ -241,6 +244,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config); filter_call_init_fails(config);
filter_causes_close(config); filter_causes_close(config);
filter_latency(config); filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config); graceful_server_shutdown(config);
high_initial_seqno(config); high_initial_seqno(config);
hpack_size(config); hpack_size(config);
@ -364,6 +368,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config); filter_latency(config);
continue; continue;
} }
if (0 == strcmp("filter_status_code", argv[i])) {
filter_status_code(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) { if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config); graceful_server_shutdown(config);
continue; continue;

@ -280,7 +280,12 @@ static grpc_channel_credentials* read_ssl_channel_creds(input_stream* inp) {
return creds; return creds;
} }
static grpc_call_credentials* read_call_creds(input_stream* inp) { static grpc_call_credentials* read_call_creds(input_stream* inp, int depth) {
if (depth > 64) {
// prevent creating infinitely deep call creds
end(inp);
return nullptr;
}
switch (next_byte(inp)) { switch (next_byte(inp)) {
default: default:
end(inp); end(inp);
@ -288,8 +293,8 @@ static grpc_call_credentials* read_call_creds(input_stream* inp) {
case 0: case 0:
return nullptr; return nullptr;
case 1: { case 1: {
grpc_call_credentials* c1 = read_call_creds(inp); grpc_call_credentials* c1 = read_call_creds(inp, depth + 1);
grpc_call_credentials* c2 = read_call_creds(inp); grpc_call_credentials* c2 = read_call_creds(inp, depth + 1);
if (c1 != nullptr && c2 != nullptr) { if (c1 != nullptr && c2 != nullptr) {
grpc_call_credentials* out = grpc_call_credentials* out =
grpc_composite_call_credentials_create(c1, c2, nullptr); grpc_composite_call_credentials_create(c1, c2, nullptr);
@ -338,7 +343,7 @@ static grpc_channel_credentials* read_channel_creds(input_stream* inp) {
break; break;
case 1: { case 1: {
grpc_channel_credentials* c1 = read_channel_creds(inp); grpc_channel_credentials* c1 = read_channel_creds(inp);
grpc_call_credentials* c2 = read_call_creds(inp); grpc_call_credentials* c2 = read_call_creds(inp, 0);
if (c1 != nullptr && c2 != nullptr) { if (c1 != nullptr && c2 != nullptr) {
grpc_channel_credentials* out = grpc_channel_credentials* out =
grpc_composite_channel_credentials_create(c1, c2, nullptr); grpc_composite_channel_credentials_create(c1, c2, nullptr);

@ -101,6 +101,7 @@ END2END_TESTS = {
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU), 'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options, 'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU), 'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True), 'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False, 'hpack_size': default_test_options._replace(proxyable=False,
traceable=False, traceable=False,

@ -146,6 +146,7 @@ END2END_TESTS = {
'trailing_metadata': test_options(), 'trailing_metadata': test_options(),
'authority_not_supported': test_options(), 'authority_not_supported': test_options(),
'filter_latency': test_options(), 'filter_latency': test_options(),
'filter_status_code': test_options(),
'workaround_cronet_compression': test_options(), 'workaround_cronet_compression': test_options(),
'write_buffering': test_options(needs_write_buffering=True), 'write_buffering': test_options(needs_write_buffering=True),
'write_buffering_at_end': test_options(needs_write_buffering=True), 'write_buffering_at_end': test_options(needs_write_buffering=True),

@ -0,0 +1,378 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
#include "test/core/end2end/cq_verifier.h"
static bool g_enable_filter = false;
static gpr_mu g_mu;
static grpc_call_stack* g_client_call_stack;
static grpc_call_stack* g_server_call_stack;
static bool g_client_code_recv;
static bool g_server_code_recv;
static gpr_cv g_client_code_cv;
static gpr_cv g_server_code_cv;
static grpc_status_code g_client_status_code;
static grpc_status_code g_server_status_code;
static void* tag(intptr_t t) { return (void*)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char* test_name,
grpc_channel_args* client_args,
grpc_channel_args* server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_from_now(int n) {
return grpc_timeout_seconds_to_deadline(n);
}
static gpr_timespec five_seconds_from_now(void) {
return n_seconds_from_now(5);
}
static void drain_cq(grpc_completion_queue* cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture* f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
nullptr)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = nullptr;
}
static void shutdown_client(grpc_end2end_test_fixture* f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = nullptr;
}
static void end_test(grpc_end2end_test_fixture* f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
grpc_completion_queue_destroy(f->shutdown_cq);
}
// Simple request via a server filter that saves the reported status code.
static void test_request(grpc_end2end_test_config config) {
grpc_call* c;
grpc_call* s;
grpc_end2end_test_fixture f =
begin_test(config, "filter_status_code", nullptr, nullptr);
cq_verifier* cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op* op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
gpr_mu_lock(&g_mu);
g_client_call_stack = nullptr;
g_server_call_stack = nullptr;
g_client_status_code = GRPC_STATUS_OK;
g_server_status_code = GRPC_STATUS_OK;
gpr_mu_unlock(&g_mu);
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(
f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/foo"),
get_host_override_slice("foo.test.google.fr", config), deadline, nullptr);
GPR_ASSERT(c);
gpr_mu_lock(&g_mu);
g_client_call_stack = grpc_call_get_call_stack(c);
gpr_mu_unlock(&g_mu);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = nullptr;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
gpr_mu_lock(&g_mu);
g_server_call_stack = grpc_call_get_call_stack(s);
gpr_mu_unlock(&g_mu);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
grpc_slice status_string = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_string;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_unref(s);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
end_test(&f);
config.tear_down_data(&f);
// Perform checks after test tear-down
// Guards against the case that there's outstanding channel-related work on a
// call prior to verification
gpr_mu_lock(&g_mu);
if (!g_client_code_recv) {
GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
grpc_timeout_seconds_to_deadline(3)) == 0);
}
if (!g_server_code_recv) {
GPR_ASSERT(gpr_cv_wait(&g_server_code_cv, &g_mu,
grpc_timeout_seconds_to_deadline(3)) == 0);
}
GPR_ASSERT(g_client_status_code == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(g_server_status_code == GRPC_STATUS_UNIMPLEMENTED);
gpr_mu_unlock(&g_mu);
}
/*******************************************************************************
* Test status_code filter
*/
typedef struct final_status_data {
grpc_call_stack* call;
} final_status_data;
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
final_status_data* data = (final_status_data*)elem->call_data;
data->call = args->call_stack;
return GRPC_ERROR_NONE;
}
static void client_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
final_status_data* data = (final_status_data*)elem->call_data;
gpr_mu_lock(&g_mu);
// Some fixtures, like proxies, will spawn intermidiate calls
// We only want the results from our explicit calls
if (data->call == g_client_call_stack) {
g_client_status_code = final_info->final_status;
g_client_code_recv = true;
gpr_cv_signal(&g_client_code_cv);
}
gpr_mu_unlock(&g_mu);
}
static void server_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
final_status_data* data = (final_status_data*)elem->call_data;
gpr_mu_lock(&g_mu);
// Some fixtures, like proxies, will spawn intermidiate calls
// We only want the results from our explicit calls
if (data->call == g_server_call_stack) {
g_server_status_code = final_info->final_status;
g_server_code_recv = true;
gpr_cv_signal(&g_server_code_cv);
}
gpr_mu_unlock(&g_mu);
}
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_channel_element* elem) {}
static const grpc_channel_filter test_client_filter = {
grpc_call_next_op,
grpc_channel_next_op,
sizeof(final_status_data),
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
client_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"client_filter_status_code"};
static const grpc_channel_filter test_server_filter = {
grpc_call_next_op,
grpc_channel_next_op,
sizeof(final_status_data),
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
server_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"server_filter_status_code"};
/*******************************************************************************
* Registration
*/
static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
grpc_channel_filter* filter = (grpc_channel_filter*)arg;
if (g_enable_filter) {
// Want to add the filter as close to the end as possible, to make
// sure that all of the filters work well together. However, we
// can't add it at the very end, because the
// connected_channel/client_channel filter must be the last one.
// So we add it right before the last one.
grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_last(builder);
GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
const bool retval = grpc_channel_stack_builder_add_filter_before(
it, filter, nullptr, nullptr);
grpc_channel_stack_builder_iterator_destroy(it);
return retval;
} else {
return true;
}
}
static void init_plugin(void) {
gpr_mu_init(&g_mu);
gpr_cv_init(&g_client_code_cv);
gpr_cv_init(&g_server_code_cv);
g_client_code_recv = false;
g_server_code_recv = false;
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_client_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_client_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_filter,
(void*)&test_server_filter);
}
static void destroy_plugin(void) {
gpr_cv_destroy(&g_client_code_cv);
gpr_cv_destroy(&g_server_code_cv);
gpr_mu_destroy(&g_mu);
}
void filter_status_code(grpc_end2end_test_config config) {
g_enable_filter = true;
test_request(config);
g_enable_filter = false;
}
void filter_status_code_pre_init(void) {
grpc_register_plugin(init_plugin, destroy_plugin);
}

@ -186,8 +186,10 @@ int main(int argc, char** argv) {
} }
if (!sc.name) { if (!sc.name) {
fprintf(stderr, "unsupported scenario '%s'. Valid are:", scenario_name); fprintf(stderr, "unsupported scenario '%s'. Valid are:", scenario_name);
fflush(stderr);
for (i = 0; i < GPR_ARRAY_SIZE(scenarios); i++) { for (i = 0; i < GPR_ARRAY_SIZE(scenarios); i++) {
fprintf(stderr, " %s", scenarios[i].name); fprintf(stderr, " %s", scenarios[i].name);
fflush(stderr);
} }
return 1; return 1;
} }

@ -18,7 +18,7 @@
#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/port.h"
/* This test only relevant on linux systems where epoll() is available */ /* This test only relevant on linux systems where epoll() is available */
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
#include "src/core/lib/iomgr/ev_epollsig_linux.h" #include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/ev_posix.h"
@ -319,6 +319,6 @@ int main(int argc, char** argv) {
grpc_shutdown(); grpc_shutdown();
return 0; return 0;
} }
#else /* defined(GRPC_LINUX_EPOLL) */ #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
int main(int argc, char** argv) { return 0; } int main(int argc, char** argv) { return 0; }
#endif /* !defined(GRPC_LINUX_EPOLL) */ #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */

@ -18,7 +18,7 @@
#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/port.h"
/* This test only relevant on linux systems where epoll is available */ /* This test only relevant on linux systems where epoll is available */
#ifdef GRPC_LINUX_EPOLL #ifdef GRPC_LINUX_EPOLL_CREATE1
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
@ -443,6 +443,6 @@ int main(int argc, char** argv) {
grpc_shutdown(); grpc_shutdown();
return 0; return 0;
} }
#else /* defined(GRPC_LINUX_EPOLL) */ #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
int main(int argc, char** argv) { return 0; } int main(int argc, char** argv) { return 0; }
#endif /* !defined(GRPC_LINUX_EPOLL) */ #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */

@ -51,6 +51,9 @@ static int g_number_of_bytes_read = 0;
static int g_number_of_orphan_calls = 0; static int g_number_of_orphan_calls = 0;
static int g_number_of_starts = 0; static int g_number_of_starts = 0;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
static void on_start(grpc_fd* emfd, void* user_data) { g_number_of_starts++; } static void on_start(grpc_fd* emfd, void* user_data) { g_number_of_starts++; }
static bool on_read(grpc_fd* emfd) { static bool on_read(grpc_fd* emfd) {
@ -177,8 +180,9 @@ static void test_no_op_with_port(void) {
memset(&resolved_addr, 0, sizeof(resolved_addr)); memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = sizeof(struct sockaddr_in); resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET; addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read, GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
on_write, on_fd_orphaned)); snd_buf_size, on_start, on_read, on_write,
on_fd_orphaned));
grpc_udp_server_destroy(s, nullptr); grpc_udp_server_destroy(s, nullptr);
@ -207,8 +211,9 @@ static void test_no_op_with_port_and_socket_factory(void) {
memset(&resolved_addr, 0, sizeof(resolved_addr)); memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = sizeof(struct sockaddr_in); resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET; addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read, GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
on_write, on_fd_orphaned)); snd_buf_size, on_start, on_read, on_write,
on_fd_orphaned));
GPR_ASSERT(socket_factory->number_of_socket_calls == 1); GPR_ASSERT(socket_factory->number_of_socket_calls == 1);
GPR_ASSERT(socket_factory->number_of_bind_calls == 1); GPR_ASSERT(socket_factory->number_of_bind_calls == 1);
@ -233,8 +238,9 @@ static void test_no_op_with_port_and_start(void) {
memset(&resolved_addr, 0, sizeof(resolved_addr)); memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = sizeof(struct sockaddr_in); resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET; addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read, GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
on_write, on_fd_orphaned)); snd_buf_size, on_start, on_read, on_write,
on_fd_orphaned));
grpc_udp_server_start(s, nullptr, 0, nullptr); grpc_udp_server_start(s, nullptr, 0, nullptr);
GPR_ASSERT(g_number_of_starts == 1); GPR_ASSERT(g_number_of_starts == 1);
@ -265,8 +271,9 @@ static void test_receive(int number_of_clients) {
memset(&resolved_addr, 0, sizeof(resolved_addr)); memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = sizeof(struct sockaddr_storage); resolved_addr.len = sizeof(struct sockaddr_storage);
addr->ss_family = AF_INET; addr->ss_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read, GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
on_write, on_fd_orphaned)); snd_buf_size, on_start, on_read, on_write,
on_fd_orphaned));
svrfd = grpc_udp_server_get_fd(s, 0); svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0); GPR_ASSERT(svrfd >= 0);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save