Merge branch 'master' into testc++ize

pull/13147/head
Yash Tibrewal 7 years ago
commit 268685bcbd
  1. 19
      BUILD
  2. 8
      CMakeLists.txt
  3. 9
      Makefile
  4. 2
      binding.gyp
  5. 12
      build.yaml
  6. 3
      config.m4
  7. 2
      config.w32
  8. 14
      doc/environment_variables.md
  9. 6
      gRPC-Core.podspec
  10. 6
      grpc.def
  11. 4
      grpc.gemspec
  12. 7
      grpc.gyp
  13. 2
      include/grpc++/alarm.h
  14. 14
      include/grpc++/channel.h
  15. 231
      include/grpc++/impl/codegen/async_stream.h
  16. 62
      include/grpc++/impl/codegen/async_unary_call.h
  17. 21
      include/grpc++/impl/codegen/byte_buffer.h
  18. 17
      include/grpc++/impl/codegen/call.h
  19. 2
      include/grpc++/impl/codegen/call_hook.h
  20. 41
      include/grpc++/impl/codegen/channel_interface.h
  21. 23
      include/grpc++/impl/codegen/client_context.h
  22. 35
      include/grpc++/impl/codegen/client_unary_call.h
  23. 51
      include/grpc++/impl/codegen/completion_queue.h
  24. 2
      include/grpc++/impl/codegen/completion_queue_tag.h
  25. 2
      include/grpc++/impl/codegen/metadata_map.h
  26. 2
      include/grpc++/impl/codegen/method_handler_impl.h
  27. 3
      include/grpc++/impl/codegen/rpc_method.h
  28. 3
      include/grpc++/impl/codegen/rpc_service_method.h
  29. 27
      include/grpc++/impl/codegen/server_context.h
  30. 42
      include/grpc++/impl/codegen/server_interface.h
  31. 46
      include/grpc++/impl/codegen/service_type.h
  32. 327
      include/grpc++/impl/codegen/sync_stream.h
  33. 6
      include/grpc++/impl/codegen/time.h
  34. 3
      include/grpc++/server.h
  35. 1
      include/grpc++/server_builder.h
  36. 74
      include/grpc/grpc_security.h
  37. 7
      include/grpc/grpc_security_constants.h
  38. 2
      include/grpc/impl/codegen/connectivity_state.h
  39. 4
      package.xml
  40. 91
      src/compiler/cpp_generator.cc
  41. 158
      src/core/ext/filters/client_channel/backup_poller.cc
  42. 34
      src/core/ext/filters/client_channel/backup_poller.h
  43. 7
      src/core/ext/filters/client_channel/client_channel.cc
  44. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  45. 686
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  46. 452
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  47. 265
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  48. 153
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  49. 4
      src/core/ext/filters/client_channel/subchannel.h
  50. 8
      src/core/ext/transport/chttp2/transport/flow_control.h
  51. 156
      src/core/lib/security/credentials/ssl/ssl_credentials.cc
  52. 12
      src/core/lib/security/credentials/ssl/ssl_credentials.h
  53. 243
      src/core/lib/security/transport/security_connector.cc
  54. 4
      src/core/lib/security/transport/security_connector.h
  55. 3
      src/core/lib/transport/connectivity_state.cc
  56. 202
      src/cpp/client/channel_cc.cc
  57. 10
      src/cpp/client/generic_stub.cc
  58. 4
      src/cpp/common/completion_queue_cc.cc
  59. 9
      src/cpp/server/health/default_health_check_service.cc
  60. 2
      src/cpp/server/health/default_health_check_service.h
  61. 69
      src/cpp/server/server_cc.cc
  62. 8
      src/cpp/server/server_context.cc
  63. 2
      src/python/grpcio/grpc_core_dependencies.py
  64. 12
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  65. 18
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  66. 4
      test/core/client_channel/lb_policies_test.cc
  67. 8
      test/core/transport/status_conversion_test.cc
  68. 17
      test/cpp/codegen/compiler_test_golden
  69. 22
      test/cpp/end2end/async_end2end_test.cc
  70. 4
      test/cpp/end2end/client_lb_end2end_test.cc
  71. 22
      test/cpp/end2end/end2end_test.cc
  72. 2
      test/cpp/microbenchmarks/bm_cq.cc
  73. 21
      test/cpp/microbenchmarks/bm_fullstack_trickle.cc
  74. 14
      test/cpp/microbenchmarks/helpers.cc
  75. 26
      test/cpp/qps/client_async.cc
  76. 37
      test/cpp/qps/server_async.cc
  77. 2
      third_party/benchmark
  78. 4
      tools/doxygen/Doxyfile.core.internal
  79. 3
      tools/internal_ci/helper_scripts/prepare_build_linux_rc
  80. 2
      tools/internal_ci/linux/grpc_build_submodule_at_head.sh
  81. 2
      tools/internal_ci/linux/grpc_portability_build_only.cfg
  82. 2
      tools/profiling/microbenchmarks/bm_diff/bm_constants.py
  83. 4
      tools/run_tests/dockerize/build_and_run_docker.sh
  84. 2
      tools/run_tests/dockerize/build_docker_and_run_tests.sh
  85. 2
      tools/run_tests/dockerize/build_interop_image.sh
  86. 30
      tools/run_tests/generated/sources_and_headers.json
  87. 2
      tools/run_tests/sanity/check_submodules.sh

19
BUILD

@ -872,6 +872,7 @@ grpc_cc_library(
grpc_cc_library( grpc_cc_library(
name = "grpc_client_channel", name = "grpc_client_channel",
srcs = [ srcs = [
"src/core/ext/filters/client_channel/backup_poller.cc",
"src/core/ext/filters/client_channel/channel_connectivity.cc", "src/core/ext/filters/client_channel/channel_connectivity.cc",
"src/core/ext/filters/client_channel/client_channel.cc", "src/core/ext/filters/client_channel/client_channel.cc",
"src/core/ext/filters/client_channel/client_channel_factory.cc", "src/core/ext/filters/client_channel/client_channel_factory.cc",
@ -894,6 +895,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/uri_parser.cc", "src/core/ext/filters/client_channel/uri_parser.cc",
], ],
hdrs = [ hdrs = [
"src/core/ext/filters/client_channel/backup_poller.h",
"src/core/ext/filters/client_channel/client_channel.h", "src/core/ext/filters/client_channel/client_channel.h",
"src/core/ext/filters/client_channel/client_channel_factory.h", "src/core/ext/filters/client_channel/client_channel_factory.h",
"src/core/ext/filters/client_channel/connector.h", "src/core/ext/filters/client_channel/connector.h",
@ -1074,6 +1076,21 @@ grpc_cc_library(
], ],
) )
grpc_cc_library(
name = "grpc_lb_subchannel_list",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library( grpc_cc_library(
name = "grpc_lb_policy_pick_first", name = "grpc_lb_policy_pick_first",
srcs = [ srcs = [
@ -1083,6 +1100,7 @@ grpc_cc_library(
deps = [ deps = [
"grpc_base", "grpc_base",
"grpc_client_channel", "grpc_client_channel",
"grpc_lb_subchannel_list",
], ],
) )
@ -1095,6 +1113,7 @@ grpc_cc_library(
deps = [ deps = [
"grpc_base", "grpc_base",
"grpc_client_channel", "grpc_client_channel",
"grpc_lb_subchannel_list",
], ],
) )

@ -1153,6 +1153,7 @@ add_library(grpc
src/core/tsi/transport_security_adapter.cc src/core/tsi/transport_security_adapter.cc
src/core/ext/transport/chttp2/server/chttp2_server.cc src/core/ext/transport/chttp2/server/chttp2_server.cc
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc
@ -1192,6 +1193,7 @@ add_library(grpc
third_party/nanopb/pb_encode.c third_party/nanopb/pb_encode.c
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@ -1477,6 +1479,7 @@ add_library(grpc_cronet
src/core/ext/filters/http/http_filters_plugin.cc src/core/ext/filters/http/http_filters_plugin.cc
src/core/ext/filters/http/message_compress/message_compress_filter.cc src/core/ext/filters/http/message_compress/message_compress_filter.cc
src/core/ext/filters/http/server/http_server_filter.cc src/core/ext/filters/http/server/http_server_filter.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc
@ -1767,6 +1770,7 @@ add_library(grpc_test_util
src/core/lib/transport/transport.cc src/core/lib/transport/transport.cc
src/core/lib/transport/transport_op_string.cc src/core/lib/transport/transport_op_string.cc
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2032,6 +2036,7 @@ add_library(grpc_test_util_unsecure
src/core/lib/transport/transport.cc src/core/lib/transport/transport.cc
src/core/lib/transport/transport_op_string.cc src/core/lib/transport/transport_op_string.cc
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2316,6 +2321,7 @@ add_library(grpc_unsecure
src/core/ext/transport/chttp2/client/insecure/channel_create.cc src/core/ext/transport/chttp2/client/insecure/channel_create.cc
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
src/core/ext/transport/chttp2/client/chttp2_connector.cc src/core/ext/transport/chttp2/client/chttp2_connector.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2358,6 +2364,7 @@ add_library(grpc_unsecure
third_party/nanopb/pb_decode.c third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c third_party/nanopb/pb_encode.c
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/census/base_resources.cc src/core/ext/census/base_resources.cc
src/core/ext/census/context.cc src/core/ext/census/context.cc
@ -3051,6 +3058,7 @@ add_library(grpc++_cronet
src/core/ext/filters/http/http_filters_plugin.cc src/core/ext/filters/http/http_filters_plugin.cc
src/core/ext/filters/http/message_compress/message_compress_filter.cc src/core/ext/filters/http/message_compress/message_compress_filter.cc
src/core/ext/filters/http/server/http_server_filter.cc src/core/ext/filters/http/server/http_server_filter.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc src/core/ext/filters/client_channel/client_channel_factory.cc

@ -3153,6 +3153,7 @@ LIBGRPC_SRC = \
src/core/tsi/transport_security_adapter.cc \ src/core/tsi/transport_security_adapter.cc \
src/core/ext/transport/chttp2/server/chttp2_server.cc \ src/core/ext/transport/chttp2/server/chttp2_server.cc \
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \ src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -3192,6 +3193,7 @@ LIBGRPC_SRC = \
third_party/nanopb/pb_encode.c \ third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -3476,6 +3478,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/filters/http/http_filters_plugin.cc \ src/core/ext/filters/http/http_filters_plugin.cc \
src/core/ext/filters/http/message_compress/message_compress_filter.cc \ src/core/ext/filters/http/message_compress/message_compress_filter.cc \
src/core/ext/filters/http/server/http_server_filter.cc \ src/core/ext/filters/http/server/http_server_filter.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -3764,6 +3767,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/transport/transport.cc \ src/core/lib/transport/transport.cc \
src/core/lib/transport/transport_op_string.cc \ src/core/lib/transport/transport_op_string.cc \
src/core/lib/debug/trace.cc \ src/core/lib/debug/trace.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4019,6 +4023,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/transport/transport.cc \ src/core/lib/transport/transport.cc \
src/core/lib/transport/transport_op_string.cc \ src/core/lib/transport/transport_op_string.cc \
src/core/lib/debug/trace.cc \ src/core/lib/debug/trace.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4280,6 +4285,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/transport/chttp2/client/insecure/channel_create.cc \ src/core/ext/transport/chttp2/client/insecure/channel_create.cc \
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc \ src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc \
src/core/ext/transport/chttp2/client/chttp2_connector.cc \ src/core/ext/transport/chttp2/client/chttp2_connector.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4322,6 +4328,7 @@ LIBGRPC_UNSECURE_SRC = \
third_party/nanopb/pb_decode.c \ third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \ third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/census/base_resources.cc \ src/core/ext/census/base_resources.cc \
src/core/ext/census/context.cc \ src/core/ext/census/context.cc \
@ -4993,6 +5000,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/filters/http/http_filters_plugin.cc \ src/core/ext/filters/http/http_filters_plugin.cc \
src/core/ext/filters/http/message_compress/message_compress_filter.cc \ src/core/ext/filters/http/message_compress/message_compress_filter.cc \
src/core/ext/filters/http/server/http_server_filter.cc \ src/core/ext/filters/http/server/http_server_filter.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -8298,6 +8306,7 @@ LIBBENCHMARK_SRC = \
third_party/benchmark/src/commandlineflags.cc \ third_party/benchmark/src/commandlineflags.cc \
third_party/benchmark/src/complexity.cc \ third_party/benchmark/src/complexity.cc \
third_party/benchmark/src/console_reporter.cc \ third_party/benchmark/src/console_reporter.cc \
third_party/benchmark/src/counter.cc \
third_party/benchmark/src/csv_reporter.cc \ third_party/benchmark/src/csv_reporter.cc \
third_party/benchmark/src/json_reporter.cc \ third_party/benchmark/src/json_reporter.cc \
third_party/benchmark/src/reporter.cc \ third_party/benchmark/src/reporter.cc \

@ -853,6 +853,7 @@
'src/core/tsi/transport_security_adapter.cc', 'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc', 'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc', 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -892,6 +893,7 @@
'third_party/nanopb/pb_encode.c', 'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',

@ -463,6 +463,7 @@ filegroups:
- grpc_trace_headers - grpc_trace_headers
- name: grpc_client_channel - name: grpc_client_channel
headers: headers:
- src/core/ext/filters/client_channel/backup_poller.h
- src/core/ext/filters/client_channel/client_channel.h - src/core/ext/filters/client_channel/client_channel.h
- src/core/ext/filters/client_channel/client_channel_factory.h - src/core/ext/filters/client_channel/client_channel_factory.h
- src/core/ext/filters/client_channel/connector.h - src/core/ext/filters/client_channel/connector.h
@ -482,6 +483,7 @@ filegroups:
- src/core/ext/filters/client_channel/subchannel_index.h - src/core/ext/filters/client_channel/subchannel_index.h
- src/core/ext/filters/client_channel/uri_parser.h - src/core/ext/filters/client_channel/uri_parser.h
src: src:
- src/core/ext/filters/client_channel/backup_poller.cc
- src/core/ext/filters/client_channel/channel_connectivity.cc - src/core/ext/filters/client_channel/channel_connectivity.cc
- src/core/ext/filters/client_channel/client_channel.cc - src/core/ext/filters/client_channel/client_channel.cc
- src/core/ext/filters/client_channel/client_channel_factory.cc - src/core/ext/filters/client_channel/client_channel_factory.cc
@ -590,6 +592,7 @@ filegroups:
uses: uses:
- grpc_base - grpc_base
- grpc_client_channel - grpc_client_channel
- grpc_lb_subchannel_list
- name: grpc_lb_policy_round_robin - name: grpc_lb_policy_round_robin
src: src:
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@ -597,6 +600,15 @@ filegroups:
uses: uses:
- grpc_base - grpc_base
- grpc_client_channel - grpc_client_channel
- grpc_lb_subchannel_list
- name: grpc_lb_subchannel_list
headers:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
src:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
uses:
- grpc_base
- grpc_client_channel
- name: grpc_max_age_filter - name: grpc_max_age_filter
headers: headers:
- src/core/ext/filters/max_age/max_age_filter.h - src/core/ext/filters/max_age/max_age_filter.h

@ -278,6 +278,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/tsi/transport_security_adapter.cc \ src/core/tsi/transport_security_adapter.cc \
src/core/ext/transport/chttp2/server/chttp2_server.cc \ src/core/ext/transport/chttp2/server/chttp2_server.cc \
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \ src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \ src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -317,6 +318,7 @@ if test "$PHP_GRPC" != "no"; then
third_party/nanopb/pb_encode.c \ third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -660,6 +662,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census/gen) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census/gen)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first)

@ -255,6 +255,7 @@ if (PHP_GRPC != "no") {
"src\\core\\tsi\\transport_security_adapter.cc " + "src\\core\\tsi\\transport_security_adapter.cc " +
"src\\core\\ext\\transport\\chttp2\\server\\chttp2_server.cc " + "src\\core\\ext\\transport\\chttp2\\server\\chttp2_server.cc " +
"src\\core\\ext\\transport\\chttp2\\client\\secure\\secure_channel_create.cc " + "src\\core\\ext\\transport\\chttp2\\client\\secure\\secure_channel_create.cc " +
"src\\core\\ext\\filters\\client_channel\\backup_poller.cc " +
"src\\core\\ext\\filters\\client_channel\\channel_connectivity.cc " + "src\\core\\ext\\filters\\client_channel\\channel_connectivity.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel.cc " + "src\\core\\ext\\filters\\client_channel\\client_channel.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel_factory.cc " + "src\\core\\ext\\filters\\client_channel\\client_channel_factory.cc " +
@ -294,6 +295,7 @@ if (PHP_GRPC != "no") {
"third_party\\nanopb\\pb_encode.c " + "third_party\\nanopb\\pb_encode.c " +
"src\\core\\ext\\filters\\client_channel\\resolver\\fake\\fake_resolver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\fake\\fake_resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\subchannel_list.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " +

@ -120,10 +120,10 @@ some configuration as environment variables that can be set.
perform name resolution perform name resolution
- ares - a DNS resolver based around the c-ares library - ares - a DNS resolver based around the c-ares library
* GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER * GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS
The channel connectivity watcher uses one extra thread to check the channel Default: 5000
state every 500 ms on the client side. It can help reconnect disconnected Declares the interval between two backup polls on client channels. These polls
client channels (mostly due to idleness), so that the next RPC on this channel are run in the timer thread so that gRPC can process connection failures while
won't fail. Set to 1 to turn off this watcher and save a thread. Please note there is no active polling thread. They help reconnect disconnected client
this is a temporary work-around, it will be removed in the future once we have channels (mostly due to idleness), so that the next RPC on this channel won't
support for automatically reestablishing failed connections. fail. Set to 0 to turn off the backup polls.

@ -299,6 +299,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.h', 'src/core/tsi/transport_security_adapter.h',
'src/core/tsi/transport_security_interface.h', 'src/core/tsi/transport_security_interface.h',
'src/core/ext/transport/chttp2/server/chttp2_server.h', 'src/core/ext/transport/chttp2/server/chttp2_server.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h', 'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_factory.h', 'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h', 'src/core/ext/filters/client_channel/connector.h',
@ -448,6 +449,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.h', 'src/core/ext/filters/load_reporting/server_load_reporting_filter.h',
@ -668,6 +670,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.cc', 'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc', 'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc', 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -704,6 +707,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c', 'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -802,6 +806,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.h', 'src/core/tsi/transport_security_adapter.h',
'src/core/tsi/transport_security_interface.h', 'src/core/tsi/transport_security_interface.h',
'src/core/ext/transport/chttp2/server/chttp2_server.h', 'src/core/ext/transport/chttp2/server/chttp2_server.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h', 'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_factory.h', 'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h', 'src/core/ext/filters/client_channel/connector.h',
@ -951,6 +956,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.h', 'src/core/ext/filters/load_reporting/server_load_reporting_filter.h',

@ -132,8 +132,14 @@ EXPORTS
grpc_metadata_credentials_create_from_plugin grpc_metadata_credentials_create_from_plugin
grpc_secure_channel_create grpc_secure_channel_create
grpc_server_credentials_release grpc_server_credentials_release
grpc_ssl_server_certificate_config_create
grpc_ssl_server_certificate_config_destroy
grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create
grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex
grpc_ssl_server_credentials_create_options_using_config
grpc_ssl_server_credentials_create_options_using_config_fetcher
grpc_ssl_server_credentials_options_destroy
grpc_ssl_server_credentials_create_with_options
grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port
grpc_call_set_credentials grpc_call_set_credentials
grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor

@ -230,6 +230,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/tsi/transport_security_adapter.h ) s.files += %w( src/core/tsi/transport_security_adapter.h )
s.files += %w( src/core/tsi/transport_security_interface.h ) s.files += %w( src/core/tsi/transport_security_interface.h )
s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.h ) s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.h )
s.files += %w( src/core/ext/filters/client_channel/backup_poller.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel.h ) s.files += %w( src/core/ext/filters/client_channel/client_channel.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.h ) s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.h )
s.files += %w( src/core/ext/filters/client_channel/connector.h ) s.files += %w( src/core/ext/filters/client_channel/connector.h )
@ -383,6 +384,7 @@ Gem::Specification.new do |s|
s.files += %w( third_party/nanopb/pb_decode.h ) s.files += %w( third_party/nanopb/pb_decode.h )
s.files += %w( third_party/nanopb/pb_encode.h ) s.files += %w( third_party/nanopb/pb_encode.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h ) s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h )
s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_filter.h ) s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_filter.h )
@ -603,6 +605,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/tsi/transport_security_adapter.cc ) s.files += %w( src/core/tsi/transport_security_adapter.cc )
s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.cc ) s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.cc )
s.files += %w( src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc ) s.files += %w( src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc )
s.files += %w( src/core/ext/filters/client_channel/backup_poller.cc )
s.files += %w( src/core/ext/filters/client_channel/channel_connectivity.cc ) s.files += %w( src/core/ext/filters/client_channel/channel_connectivity.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel.cc ) s.files += %w( src/core/ext/filters/client_channel/client_channel.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.cc ) s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.cc )
@ -642,6 +645,7 @@ Gem::Specification.new do |s|
s.files += %w( third_party/nanopb/pb_encode.c ) s.files += %w( third_party/nanopb/pb_encode.c )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc )

@ -419,6 +419,7 @@
'src/core/tsi/transport_security_adapter.cc', 'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc', 'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc', 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -458,6 +459,7 @@
'third_party/nanopb/pb_encode.c', 'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -661,6 +663,7 @@
'src/core/lib/transport/transport.cc', 'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc', 'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/debug/trace.cc', 'src/core/lib/debug/trace.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -868,6 +871,7 @@
'src/core/lib/transport/transport.cc', 'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc', 'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/debug/trace.cc', 'src/core/lib/debug/trace.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -1093,6 +1097,7 @@
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc', 'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc', 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc', 'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -1135,6 +1140,7 @@
'third_party/nanopb/pb_decode.c', 'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c', 'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/census/base_resources.cc', 'src/core/ext/census/base_resources.cc',
'src/core/ext/census/context.cc', 'src/core/ext/census/context.cc',
@ -2316,6 +2322,7 @@
'third_party/benchmark/src/commandlineflags.cc', 'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc', 'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc', 'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc', 'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc', 'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/reporter.cc', 'third_party/benchmark/src/reporter.cc',

@ -92,7 +92,7 @@ class Alarm : private GrpcLibraryCodegen {
} }
private: private:
class AlarmEntry : public CompletionQueueTag { class AlarmEntry : public internal::CompletionQueueTag {
public: public:
AlarmEntry(void* tag) : tag_(tag) {} AlarmEntry(void* tag) : tag_(tag) {}
void Set(void* tag) { tag_ = tag; } void Set(void* tag) { tag_ = tag; }

@ -32,7 +32,7 @@ struct grpc_channel;
namespace grpc { namespace grpc {
/// Channels represent a connection to an endpoint. Created by \a CreateChannel. /// Channels represent a connection to an endpoint. Created by \a CreateChannel.
class Channel final : public ChannelInterface, class Channel final : public ChannelInterface,
public CallHook, public internal::CallHook,
public std::enable_shared_from_this<Channel>, public std::enable_shared_from_this<Channel>,
private GrpcLibraryCodegen { private GrpcLibraryCodegen {
public: public:
@ -51,18 +51,16 @@ class Channel final : public ChannelInterface,
private: private:
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel, friend class internal::BlockingUnaryCallImpl;
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend std::shared_ptr<Channel> CreateChannelInternal( friend std::shared_ptr<Channel> CreateChannelInternal(
const grpc::string& host, grpc_channel* c_channel); const grpc::string& host, grpc_channel* c_channel);
Channel(const grpc::string& host, grpc_channel* c_channel); Channel(const grpc::string& host, grpc_channel* c_channel);
Call CreateCall(const RpcMethod& method, ClientContext* context, internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) override; CompletionQueue* cq) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override; void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void* RegisterMethod(const char* method) override; void* RegisterMethod(const char* method) override;
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,

@ -30,6 +30,7 @@ namespace grpc {
class CompletionQueue; class CompletionQueue;
namespace internal {
/// Common interface for all client side asynchronous streaming. /// Common interface for all client side asynchronous streaming.
class ClientAsyncStreamingInterface { class ClientAsyncStreamingInterface {
public: public:
@ -151,15 +152,16 @@ class AsyncWriterInterface {
} }
}; };
} // namespace internal
template <class R> template <class R>
class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface, class ClientAsyncReaderInterface
public AsyncReaderInterface<R> {}; : public internal::ClientAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {};
/// Async client-side API for doing server-streaming RPCs, namespace internal {
/// where the incoming message stream coming from the server has
/// messages of type \a R.
template <class R> template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> { class ClientAsyncReaderFactory {
public: public:
/// Create a stream object. /// Create a stream object.
/// Write the first request out if \a start is set. /// Write the first request out if \a start is set.
@ -169,16 +171,25 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
template <class W> template <class W>
static ClientAsyncReader* Create(ChannelInterface* channel, static ClientAsyncReader<R>* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request, ClientContext* context, const W& request,
bool start, void* tag) { bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq); ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader))) call.call(), sizeof(ClientAsyncReader<R>)))
ClientAsyncReader(call, context, request, start, tag); ClientAsyncReader<R>(call, context, request, start, tag);
} }
};
} // namespace internal
/// Async client-side API for doing server-streaming RPCs,
/// where the incoming message stream coming from the server has
/// messages of type \a R.
template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) { static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReader)); assert(size == sizeof(ClientAsyncReader));
@ -233,9 +244,10 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
} }
private: private:
friend class internal::ClientAsyncReaderFactory<R>;
template <class W> template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request, ClientAsyncReader(::grpc::internal::Call call, ClientContext* context,
bool start, void* tag) const W& request, bool start, void* tag)
: context_(context), call_(call), started_(start) { : context_(context), call_(call), started_(start) {
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok()); GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
@ -255,19 +267,27 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
} }
ClientContext* context_; ClientContext* context_;
Call call_; ::grpc::internal::Call call_;
bool started_; bool started_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_ops_; init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_; meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
}; };
/// Common interface for client side asynchronous writing. /// Common interface for client side asynchronous writing.
template <class W> template <class W>
class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface, class ClientAsyncWriterInterface
public AsyncWriterInterface<W> { : public internal::ClientAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public: public:
/// Signal the client is done with the writes (half-close the client stream). /// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read /// Thread-safe with respect to \a AsyncReaderInterface::Read
@ -276,11 +296,9 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
virtual void WritesDone(void* tag) = 0; virtual void WritesDone(void* tag) = 0;
}; };
/// Async API on the client side for doing client-streaming RPCs, namespace internal {
/// where the outgoing message stream going to the server contains
/// messages of type \a W.
template <class W> template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> { class ClientAsyncWriterFactory {
public: public:
/// Create a stream object. /// Create a stream object.
/// Start the RPC if \a start is set /// Start the RPC if \a start is set
@ -294,16 +312,25 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// message from the server upon a successful call to the \a Finish /// message from the server upon a successful call to the \a Finish
/// method of this instance. /// method of this instance.
template <class R> template <class R>
static ClientAsyncWriter* Create(ChannelInterface* channel, static ClientAsyncWriter<W>* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response, ClientContext* context, R* response,
bool start, void* tag) { bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq); ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter))) call.call(), sizeof(ClientAsyncWriter<W>)))
ClientAsyncWriter(call, context, response, start, tag); ClientAsyncWriter<W>(call, context, response, start, tag);
} }
};
} // namespace internal
/// Async API on the client side for doing client-streaming RPCs,
/// where the outgoing message stream going to the server contains
/// messages of type \a W.
template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) { static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncWriter)); assert(size == sizeof(ClientAsyncWriter));
@ -376,9 +403,10 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
} }
private: private:
friend class internal::ClientAsyncWriterFactory<W>;
template <class R> template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, bool start, ClientAsyncWriter(::grpc::internal::Call call, ClientContext* context,
void* tag) R* response, bool start, void* tag)
: context_(context), call_(call), started_(start) { : context_(context), call_(call), started_(start) {
finish_ops_.RecvMessage(response); finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage(); finish_ops_.AllowNoMessage();
@ -401,13 +429,17 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
} }
ClientContext* context_; ClientContext* context_;
Call call_; ::grpc::internal::Call call_;
bool started_; bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_; write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
CallOpClientRecvStatus> ::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_; finish_ops_;
}; };
@ -415,9 +447,10 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// where the client-to-server message stream has messages of type \a W, /// where the client-to-server message stream has messages of type \a W,
/// and the server-to-client message stream has messages of type \a R. /// and the server-to-client message stream has messages of type \a R.
template <class W, class R> template <class W, class R>
class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface, class ClientAsyncReaderWriterInterface
public AsyncWriterInterface<W>, : public internal::ClientAsyncStreamingInterface,
public AsyncReaderInterface<R> { public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public: public:
/// Signal the client is done with the writes (half-close the client stream). /// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read /// Thread-safe with respect to \a AsyncReaderInterface::Read
@ -426,13 +459,9 @@ class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
virtual void WritesDone(void* tag) = 0; virtual void WritesDone(void* tag) = 0;
}; };
/// Async client-side interface for bi-directional streaming, namespace internal {
/// where the outgoing message stream going to the server
/// has messages of type \a W, and the incoming message stream coming
/// from the server has messages of type \a R.
template <class W, class R> template <class W, class R>
class ClientAsyncReaderWriter final class ClientAsyncReaderWriterFactory {
: public ClientAsyncReaderWriterInterface<W, R> {
public: public:
/// Create a stream object. /// Create a stream object.
/// Start the RPC request if \a start is set. /// Start the RPC request if \a start is set.
@ -441,18 +470,27 @@ class ClientAsyncReaderWriter final
/// nullptr and the actual call must be initiated by StartCall /// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(ChannelInterface* channel, static ClientAsyncReaderWriter<W, R>* Create(
CompletionQueue* cq, ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, const ::grpc::internal::RpcMethod& method, ClientContext* context,
ClientContext* context, bool start, bool start, void* tag) {
void* tag) { ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter))) call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
ClientAsyncReaderWriter(call, context, start, tag); ClientAsyncReaderWriter<W, R>(call, context, start, tag);
} }
};
} // namespace internal
/// Async client-side interface for bi-directional streaming,
/// where the outgoing message stream going to the server
/// has messages of type \a W, and the incoming message stream coming
/// from the server has messages of type \a R.
template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) { static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReaderWriter)); assert(size == sizeof(ClientAsyncReaderWriter));
@ -532,8 +570,9 @@ class ClientAsyncReaderWriter final
} }
private: private:
ClientAsyncReaderWriter(Call call, ClientContext* context, bool start, friend class internal::ClientAsyncReaderWriterFactory<W, R>;
void* tag) ClientAsyncReaderWriter(::grpc::internal::Call call, ClientContext* context,
bool start, void* tag)
: context_(context), call_(call), started_(start) { : context_(context), call_(call), started_(start) {
if (start) { if (start) {
StartCallInternal(tag); StartCallInternal(tag);
@ -554,18 +593,26 @@ class ClientAsyncReaderWriter final
} }
ClientContext* context_; ClientContext* context_;
Call call_; ::grpc::internal::Call call_;
bool started_; bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_; meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_; write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
}; };
template <class W, class R> template <class W, class R>
class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface, class ServerAsyncReaderInterface
public AsyncReaderInterface<R> { : public internal::ServerAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {
public: public:
/// Indicate that the stream is to be finished with a certain status code /// Indicate that the stream is to be finished with a certain status code
/// and also send out \a msg response to the client. /// and also send out \a msg response to the client.
@ -692,20 +739,23 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
} }
private: private:
void BindCall(Call* call) override { call_ = *call; } void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_; ::grpc::internal::Call call_;
ServerContext* ctx_; ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
CallOpSet<CallOpRecvMessage<R>> read_ops_; meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
CallOpServerSendStatus> ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_ops_; finish_ops_;
}; };
template <class W> template <class W>
class ServerAsyncWriterInterface : public ServerAsyncStreamingInterface, class ServerAsyncWriterInterface
public AsyncWriterInterface<W> { : public internal::ServerAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public: public:
/// Indicate that the stream is to be finished with a certain status code. /// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate /// Request notification for when the server has sent the appropriate
@ -823,7 +873,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
} }
private: private:
void BindCall(Call* call) override { call_ = *call; } void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T> template <class T>
void EnsureInitialMetadataSent(T* ops) { void EnsureInitialMetadataSent(T* ops) {
@ -837,20 +887,25 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
} }
} }
Call call_; ::grpc::internal::Call call_;
ServerContext* ctx_; ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, meta_ops_;
CallOpServerSendStatus> ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_; write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
}; };
/// Server-side interface for asynchronous bi-directional streaming. /// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R> template <class W, class R>
class ServerAsyncReaderWriterInterface : public ServerAsyncStreamingInterface, class ServerAsyncReaderWriterInterface
public AsyncWriterInterface<W>, : public internal::ServerAsyncStreamingInterface,
public AsyncReaderInterface<R> { public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public: public:
/// Indicate that the stream is to be finished with a certain status code. /// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate /// Request notification for when the server has sent the appropriate
@ -980,7 +1035,7 @@ class ServerAsyncReaderWriter final
private: private:
friend class ::grpc::Server; friend class ::grpc::Server;
void BindCall(Call* call) override { call_ = *call; } void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T> template <class T>
void EnsureInitialMetadataSent(T* ops) { void EnsureInitialMetadataSent(T* ops) {
@ -994,14 +1049,18 @@ class ServerAsyncReaderWriter final
} }
} }
Call call_; ::grpc::internal::Call call_;
ServerContext* ctx_; ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
CallOpSet<CallOpRecvMessage<R>> read_ops_; meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
CallOpServerSendStatus> ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_; write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
}; };
} // namespace grpc } // namespace grpc

@ -69,11 +69,9 @@ class ClientAsyncResponseReaderInterface {
virtual void Finish(R* msg, Status* status, void* tag) = 0; virtual void Finish(R* msg, Status* status, void* tag) = 0;
}; };
/// Async API for client-side unary RPCs, where the message response namespace internal {
/// received from the server is of type \a R.
template <class R> template <class R>
class ClientAsyncResponseReader final class ClientAsyncResponseReaderFactory {
: public ClientAsyncResponseReaderInterface<R> {
public: public:
/// Start a call and write the request out if \a start is set. /// Start a call and write the request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e. /// \a tag will be notified on \a cq when the call has been started (i.e.
@ -82,17 +80,24 @@ class ClientAsyncResponseReader final
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
template <class W> template <class W>
static ClientAsyncResponseReader* Create(ChannelInterface* channel, static ClientAsyncResponseReader<R>* Create(
CompletionQueue* cq, ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, const ::grpc::internal::RpcMethod& method, ClientContext* context,
ClientContext* context,
const W& request, bool start) { const W& request, bool start) {
Call call = channel->CreateCall(method, context, cq); ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader))) call.call(), sizeof(ClientAsyncResponseReader<R>)))
ClientAsyncResponseReader(call, context, request, start); ClientAsyncResponseReader<R>(call, context, request, start);
} }
};
} // namespace internal
/// Async API for client-side unary RPCs, where the message response
/// received from the server is of type \a R.
template <class R>
class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) { static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncResponseReader)); assert(size == sizeof(ClientAsyncResponseReader));
@ -137,13 +142,14 @@ class ClientAsyncResponseReader final
} }
private: private:
friend class internal::ClientAsyncResponseReaderFactory<R>;
ClientContext* const context_; ClientContext* const context_;
Call call_; ::grpc::internal::Call call_;
bool started_; bool started_;
template <class W> template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request, ClientAsyncResponseReader(::grpc::internal::Call call, ClientContext* context,
bool start) const W& request, bool start)
: context_(context), call_(call), started_(start) { : context_(context), call_(call), started_(start) {
// Bind the metadata at time of StartCallInternal but set up the rest here // Bind the metadata at time of StartCallInternal but set up the rest here
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
@ -162,19 +168,23 @@ class ClientAsyncResponseReader final
static void* operator new(std::size_t size); static void* operator new(std::size_t size);
static void* operator new(std::size_t size, void* p) { return p; } static void* operator new(std::size_t size, void* p) { return p; }
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, ::grpc::internal::SneakyCallOpSet<::grpc::internal::CallOpSendInitialMetadata,
CallOpClientSendClose> ::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_buf; init_buf;
CallOpSet<CallOpRecvInitialMetadata> meta_buf; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>, meta_buf;
CallOpClientRecvStatus> ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>,
::grpc::internal::CallOpClientRecvStatus>
finish_buf; finish_buf;
}; };
/// Async server-side API for handling unary calls, where the single /// Async server-side API for handling unary calls, where the single
/// response message sent to the client is of type \a W. /// response message sent to the client is of type \a W.
template <class W> template <class W>
class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface { class ServerAsyncResponseWriter final
: public internal::ServerAsyncStreamingInterface {
public: public:
explicit ServerAsyncResponseWriter(ServerContext* ctx) explicit ServerAsyncResponseWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {} : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
@ -262,13 +272,15 @@ class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
} }
private: private:
void BindCall(Call* call) override { call_ = *call; } void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_; ::grpc::internal::Call call_;
ServerContext* ctx_; ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_buf_; ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, meta_buf_;
CallOpServerSendStatus> ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_buf_; finish_buf_;
}; };

@ -31,18 +31,19 @@
namespace grpc { namespace grpc {
namespace internal {
class CallOpSendMessage;
template <class R> template <class R>
class CallOpRecvMessage; class CallOpRecvMessage;
class CallOpGenericRecvMessage;
class MethodHandler; class MethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler; class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
namespace CallOpGenericRecvMessageHelper {
template <class R> template <class R>
class DeserializeFuncType; class DeserializeFuncType;
} // namespace CallOpGenericRecvMessageHelper } // namespace internal
/// A sequence of bytes. /// A sequence of bytes.
class ByteBuffer final { class ByteBuffer final {
public: public:
@ -97,17 +98,17 @@ class ByteBuffer final {
private: private:
friend class SerializationTraits<ByteBuffer, void>; friend class SerializationTraits<ByteBuffer, void>;
friend class CallOpSendMessage; friend class internal::CallOpSendMessage;
template <class R> template <class R>
friend class CallOpRecvMessage; friend class internal::CallOpRecvMessage;
friend class CallOpGenericRecvMessage; friend class internal::CallOpGenericRecvMessage;
friend class MethodHandler; friend class internal::MethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler; friend class internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler; friend class internal::ServerStreamingHandler;
template <class R> template <class R>
friend class CallOpGenericRecvMessageHelper::DeserializeFuncType; friend class internal::DeserializeFuncType;
grpc_byte_buffer* buffer_; grpc_byte_buffer* buffer_;

@ -43,11 +43,13 @@
namespace grpc { namespace grpc {
class ByteBuffer; class ByteBuffer;
class Call;
class CallHook;
class CompletionQueue; class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface; extern CoreCodegenInterface* g_core_codegen_interface;
namespace internal {
class Call;
class CallHook;
const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin"; const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin";
// TODO(yangg) if the map is changed before we send, the pointers will be a // TODO(yangg) if the map is changed before we send, the pointers will be a
@ -75,6 +77,7 @@ inline grpc_metadata* FillMetadataArray(
} }
return metadata_array; return metadata_array;
} }
} // namespace internal
/// Per-message write options. /// Per-message write options.
class WriteOptions { class WriteOptions {
@ -199,6 +202,7 @@ class WriteOptions {
bool last_message_; bool last_message_;
}; };
namespace internal {
/// Default argument for CallOpSet. I is unused by the class, but can be /// Default argument for CallOpSet. I is unused by the class, but can be
/// used for generating multiple names for the same thing. /// used for generating multiple names for the same thing.
template <int I> template <int I>
@ -387,7 +391,6 @@ class CallOpRecvMessage {
bool allow_not_getting_message_; bool allow_not_getting_message_;
}; };
namespace CallOpGenericRecvMessageHelper {
class DeserializeFunc { class DeserializeFunc {
public: public:
virtual Status Deserialize(ByteBuffer* buf) = 0; virtual Status Deserialize(ByteBuffer* buf) = 0;
@ -407,7 +410,6 @@ class DeserializeFuncType final : public DeserializeFunc {
private: private:
R* message_; // Not a managed pointer because management is external to this R* message_; // Not a managed pointer because management is external to this
}; };
} // namespace CallOpGenericRecvMessageHelper
class CallOpGenericRecvMessage { class CallOpGenericRecvMessage {
public: public:
@ -418,8 +420,7 @@ class CallOpGenericRecvMessage {
void RecvMessage(R* message) { void RecvMessage(R* message) {
// Use an explicit base class pointer to avoid resolution error in the // Use an explicit base class pointer to avoid resolution error in the
// following unique_ptr::reset for some old implementations. // following unique_ptr::reset for some old implementations.
CallOpGenericRecvMessageHelper::DeserializeFunc* func = DeserializeFunc* func = new DeserializeFuncType<R>(message);
new CallOpGenericRecvMessageHelper::DeserializeFuncType<R>(message);
deserialize_.reset(func); deserialize_.reset(func);
} }
@ -459,7 +460,7 @@ class CallOpGenericRecvMessage {
} }
private: private:
std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_; std::unique_ptr<DeserializeFunc> deserialize_;
ByteBuffer recv_buf_; ByteBuffer recv_buf_;
bool allow_not_getting_message_; bool allow_not_getting_message_;
}; };
@ -714,7 +715,7 @@ class Call final {
grpc_call* call_; grpc_call* call_;
int max_receive_message_size_; int max_receive_message_size_;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CALL_H #endif // GRPCXX_IMPL_CODEGEN_CALL_H

@ -21,6 +21,7 @@
namespace grpc { namespace grpc {
namespace internal {
class CallOpSetInterface; class CallOpSetInterface;
class Call; class Call;
@ -31,6 +32,7 @@ class CallHook {
virtual ~CallHook() {} virtual ~CallHook() {}
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0; virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc

@ -24,10 +24,8 @@
#include <grpc/impl/codegen/connectivity_state.h> #include <grpc/impl/codegen/connectivity_state.h>
namespace grpc { namespace grpc {
class Call; class ChannelInterface;
class ClientContext; class ClientContext;
class RpcMethod;
class CallOpSetInterface;
class CompletionQueue; class CompletionQueue;
template <class R> template <class R>
@ -36,14 +34,22 @@ template <class W>
class ClientWriter; class ClientWriter;
template <class W, class R> template <class W, class R>
class ClientReaderWriter; class ClientReaderWriter;
namespace internal {
class Call;
class CallOpSetInterface;
class RpcMethod;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
template <class R> template <class R>
class ClientAsyncReader; class ClientAsyncReaderFactory;
template <class W> template <class W>
class ClientAsyncWriter; class ClientAsyncWriterFactory;
template <class W, class R> template <class W, class R>
class ClientAsyncReaderWriter; class ClientAsyncReaderWriterFactory;
template <class R> template <class R>
class ClientAsyncResponseReader; class ClientAsyncResponseReaderFactory;
} // namespace internal
/// Codegen interface for \a grpc::Channel. /// Codegen interface for \a grpc::Channel.
class ChannelInterface { class ChannelInterface {
@ -88,23 +94,21 @@ class ChannelInterface {
template <class W, class R> template <class W, class R>
friend class ::grpc::ClientReaderWriter; friend class ::grpc::ClientReaderWriter;
template <class R> template <class R>
friend class ::grpc::ClientAsyncReader; friend class ::grpc::internal::ClientAsyncReaderFactory;
template <class W> template <class W>
friend class ::grpc::ClientAsyncWriter; friend class ::grpc::internal::ClientAsyncWriterFactory;
template <class W, class R> template <class W, class R>
friend class ::grpc::ClientAsyncReaderWriter; friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
template <class R> template <class R>
friend class ::grpc::ClientAsyncResponseReader; friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel, friend class ::grpc::internal::BlockingUnaryCallImpl;
const RpcMethod& method, friend class ::grpc::internal::RpcMethod;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context, ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class ::grpc::RpcMethod;
virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) = 0; CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0; virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0; virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline, gpr_timespec deadline,
@ -112,7 +116,6 @@ class ChannelInterface {
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0; gpr_timespec deadline) = 0;
}; };
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CHANNEL_INTERFACE_H #endif // GRPCXX_IMPL_CODEGEN_CHANNEL_INTERFACE_H

@ -60,7 +60,16 @@ class Channel;
class ChannelInterface; class ChannelInterface;
class CompletionQueue; class CompletionQueue;
class CallCredentials; class CallCredentials;
class ClientContext;
namespace internal {
class RpcMethod; class RpcMethod;
class CallOpClientRecvStatus;
class CallOpRecvInitialMetadata;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
} // namespace internal
template <class R> template <class R>
class ClientReader; class ClientReader;
template <class W> template <class W>
@ -345,8 +354,8 @@ class ClientContext {
ClientContext& operator=(const ClientContext&); ClientContext& operator=(const ClientContext&);
friend class ::grpc::testing::InteropClientContextInspector; friend class ::grpc::testing::InteropClientContextInspector;
friend class CallOpClientRecvStatus; friend class ::grpc::internal::CallOpClientRecvStatus;
friend class CallOpRecvInitialMetadata; friend class ::grpc::internal::CallOpRecvInitialMetadata;
friend class Channel; friend class Channel;
template <class R> template <class R>
friend class ::grpc::ClientReader; friend class ::grpc::ClientReader;
@ -363,11 +372,7 @@ class ClientContext {
template <class R> template <class R>
friend class ::grpc::ClientAsyncResponseReader; friend class ::grpc::ClientAsyncResponseReader;
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel, friend class ::grpc::internal::BlockingUnaryCallImpl;
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
grpc_call* call() const { return call_; } grpc_call* call() const { return call_; }
void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel); void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
@ -399,8 +404,8 @@ class ClientContext {
mutable std::shared_ptr<const AuthContext> auth_context_; mutable std::shared_ptr<const AuthContext> auth_context_;
struct census_context* census_context_; struct census_context* census_context_;
std::multimap<grpc::string, grpc::string> send_initial_metadata_; std::multimap<grpc::string, grpc::string> send_initial_metadata_;
MetadataMap recv_initial_metadata_; internal::MetadataMap recv_initial_metadata_;
MetadataMap trailing_metadata_; internal::MetadataMap trailing_metadata_;
grpc_call* propagate_from_call_; grpc_call* propagate_from_call_;
PropagationOptions propagation_options_; PropagationOptions propagation_options_;

@ -30,13 +30,25 @@ namespace grpc {
class Channel; class Channel;
class ClientContext; class ClientContext;
class CompletionQueue; class CompletionQueue;
class RpcMethod;
namespace internal {
class RpcMethod;
/// Wrapper that performs a blocking unary call /// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method, Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request, ClientContext* context, const InputMessage& request,
OutputMessage* result) { OutputMessage* result) {
return BlockingUnaryCallImpl<InputMessage, OutputMessage>(
channel, method, context, request, result)
.status();
};
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl {
public:
BlockingUnaryCallImpl(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result) {
CompletionQueue cq(grpc_completion_queue_attributes{ CompletionQueue cq(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}); // Pluckable completion queue GRPC_CQ_DEFAULT_POLLING}); // Pluckable completion queue
@ -45,28 +57,33 @@ Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>, CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>,
CallOpClientSendClose, CallOpClientRecvStatus> CallOpClientSendClose, CallOpClientRecvStatus>
ops; ops;
Status status = ops.SendMessage(request); status_ = ops.SendMessage(request);
if (!status.ok()) { if (!status_.ok()) {
return status; return;
} }
ops.SendInitialMetadata(context->send_initial_metadata_, ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags()); context->initial_metadata_flags());
ops.RecvInitialMetadata(context); ops.RecvInitialMetadata(context);
ops.RecvMessage(result); ops.RecvMessage(result);
ops.ClientSendClose(); ops.ClientSendClose();
ops.ClientRecvStatus(context, &status); ops.ClientRecvStatus(context, &status_);
call.PerformOps(&ops); call.PerformOps(&ops);
if (cq.Pluck(&ops)) { if (cq.Pluck(&ops)) {
if (!ops.got_message && status.ok()) { if (!ops.got_message && status_.ok()) {
return Status(StatusCode::UNIMPLEMENTED, status_ = Status(StatusCode::UNIMPLEMENTED,
"No message returned for unary request"); "No message returned for unary request");
} }
} else { } else {
GPR_CODEGEN_ASSERT(!status.ok()); GPR_CODEGEN_ASSERT(!status_.ok());
} }
return status;
} }
Status status() { return status_; }
private:
Status status_;
};
} // namespace internal
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CLIENT_UNARY_CALL_H #endif // GRPCXX_IMPL_CODEGEN_CLIENT_UNARY_CALL_H

@ -56,7 +56,19 @@ class ServerWriter;
namespace internal { namespace internal {
template <class W, class R> template <class W, class R>
class ServerReaderWriterBody; class ServerReaderWriterBody;
} } // namespace internal
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueue;
class Server;
class ServerBuilder;
class ServerContext;
namespace internal {
class CompletionQueueTag;
class RpcMethod;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler; class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
@ -66,16 +78,11 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
class UnknownMethodHandler; class UnknownMethodHandler;
template <class Streamer, bool WriteNeeded>
class Channel; class TemplatedBidiStreamingHandler;
class ChannelInterface; template <class InputMessage, class OutputMessage>
class ClientContext; class BlockingUnaryCallImpl;
class CompletionQueueTag; } // namespace internal
class CompletionQueue;
class RpcMethod;
class Server;
class ServerBuilder;
class ServerContext;
extern CoreCodegenInterface* g_core_codegen_interface; extern CoreCodegenInterface* g_core_codegen_interface;
@ -220,22 +227,18 @@ class CompletionQueue : private GrpcLibraryCodegen {
template <class W, class R> template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody; friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler; friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler; friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler; friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler; friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler; friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::Server; friend class ::grpc::Server;
friend class ::grpc::ServerContext; friend class ::grpc::ServerContext;
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel, friend class ::grpc::internal::BlockingUnaryCallImpl;
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
/// EXPERIMENTAL /// EXPERIMENTAL
/// Creates a Thread Local cache to store the first event /// Creates a Thread Local cache to store the first event
@ -256,7 +259,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// Wraps \a grpc_completion_queue_pluck. /// Wraps \a grpc_completion_queue_pluck.
/// \warning Must not be mixed with calls to \a Next. /// \warning Must not be mixed with calls to \a Next.
bool Pluck(CompletionQueueTag* tag) { bool Pluck(internal::CompletionQueueTag* tag) {
auto deadline = auto deadline =
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME); g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
@ -277,7 +280,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// implementation to simple call the other TryPluck function with a zero /// implementation to simple call the other TryPluck function with a zero
/// timeout. i.e: /// timeout. i.e:
/// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME)) /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
void TryPluck(CompletionQueueTag* tag) { void TryPluck(internal::CompletionQueueTag* tag) {
auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME); auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr); cq_, tag, deadline, nullptr);
@ -293,7 +296,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// ///
/// This exects tag->FinalizeResult (if called) to return 'false' i.e expects /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
/// that the tag is internal not something that is returned to the user. /// that the tag is internal not something that is returned to the user.
void TryPluck(CompletionQueueTag* tag, gpr_timespec deadline) { void TryPluck(internal::CompletionQueueTag* tag, gpr_timespec deadline) {
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr); cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) { if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {

@ -21,6 +21,7 @@
namespace grpc { namespace grpc {
namespace internal {
/// An interface allowing implementors to process and filter event tags. /// An interface allowing implementors to process and filter event tags.
class CompletionQueueTag { class CompletionQueueTag {
public: public:
@ -31,6 +32,7 @@ class CompletionQueueTag {
/// queue /// queue
virtual bool FinalizeResult(void** tag, bool* status) = 0; virtual bool FinalizeResult(void** tag, bool* status) = 0;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc

@ -23,6 +23,7 @@
namespace grpc { namespace grpc {
namespace internal {
class MetadataMap { class MetadataMap {
public: public:
MetadataMap() { memset(&arr_, 0, sizeof(arr_)); } MetadataMap() { memset(&arr_, 0, sizeof(arr_)); }
@ -50,6 +51,7 @@ class MetadataMap {
grpc_metadata_array arr_; grpc_metadata_array arr_;
std::multimap<grpc::string_ref, grpc::string_ref> map_; std::multimap<grpc::string_ref, grpc::string_ref> map_;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc

@ -26,6 +26,7 @@
namespace grpc { namespace grpc {
namespace internal {
/// A wrapper class of an application provided rpc method handler. /// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler : public MethodHandler { class RpcMethodHandler : public MethodHandler {
@ -266,6 +267,7 @@ class UnknownMethodHandler : public MethodHandler {
} }
}; };
} // namespace internal
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H #endif // GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H

@ -24,7 +24,7 @@
#include <grpc++/impl/codegen/channel_interface.h> #include <grpc++/impl/codegen/channel_interface.h>
namespace grpc { namespace grpc {
namespace internal {
/// Descriptor of an RPC method /// Descriptor of an RPC method
class RpcMethod { class RpcMethod {
public: public:
@ -55,6 +55,7 @@ class RpcMethod {
void* const channel_tag_; void* const channel_tag_;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_RPC_METHOD_H #endif // GRPCXX_IMPL_CODEGEN_RPC_METHOD_H

@ -32,8 +32,8 @@
namespace grpc { namespace grpc {
class ServerContext; class ServerContext;
class StreamContextInterface;
namespace internal {
/// Base class for running an RPC handler. /// Base class for running an RPC handler.
class MethodHandler { class MethodHandler {
public: public:
@ -71,6 +71,7 @@ class RpcServiceMethod : public RpcMethod {
void* server_tag_; void* server_tag_;
std::unique_ptr<MethodHandler> handler_; std::unique_ptr<MethodHandler> handler_;
}; };
} // namespace internal
} // namespace grpc } // namespace grpc

@ -55,7 +55,6 @@ class ServerWriter;
namespace internal { namespace internal {
template <class W, class R> template <class W, class R>
class ServerReaderWriterBody; class ServerReaderWriterBody;
}
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler; class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
@ -65,9 +64,11 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
class UnknownMethodHandler; class UnknownMethodHandler;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
class Call; class Call;
class CallOpBuffer; } // namespace internal
class CompletionQueue; class CompletionQueue;
class Server; class Server;
class ServerInterface; class ServerInterface;
@ -247,14 +248,14 @@ class ServerContext {
template <class W, class R> template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody; friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler; friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler; friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler; friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded> template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler; friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler; friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::ClientContext; friend class ::grpc::ClientContext;
/// Prevent copying. /// Prevent copying.
@ -263,9 +264,9 @@ class ServerContext {
class CompletionOp; class CompletionOp;
void BeginCompletionOp(Call* call); void BeginCompletionOp(internal::Call* call);
/// Return the tag queued by BeginCompletionOp() /// Return the tag queued by BeginCompletionOp()
CompletionQueueTag* GetCompletionOpTag(); internal::CompletionQueueTag* GetCompletionOpTag();
ServerContext(gpr_timespec deadline, grpc_metadata_array* arr); ServerContext(gpr_timespec deadline, grpc_metadata_array* arr);
@ -282,7 +283,7 @@ class ServerContext {
CompletionQueue* cq_; CompletionQueue* cq_;
bool sent_initial_metadata_; bool sent_initial_metadata_;
mutable std::shared_ptr<const AuthContext> auth_context_; mutable std::shared_ptr<const AuthContext> auth_context_;
MetadataMap client_metadata_; internal::MetadataMap client_metadata_;
std::multimap<grpc::string, grpc::string> initial_metadata_; std::multimap<grpc::string, grpc::string> initial_metadata_;
std::multimap<grpc::string, grpc::string> trailing_metadata_; std::multimap<grpc::string, grpc::string> trailing_metadata_;
@ -290,7 +291,9 @@ class ServerContext {
grpc_compression_level compression_level_; grpc_compression_level compression_level_;
grpc_compression_algorithm compression_algorithm_; grpc_compression_algorithm compression_algorithm_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> pending_ops_; internal::CallOpSet<internal::CallOpSendInitialMetadata,
internal::CallOpSendMessage>
pending_ops_;
bool has_pending_ops_; bool has_pending_ops_;
}; };

@ -30,20 +30,21 @@ namespace grpc {
class AsyncGenericService; class AsyncGenericService;
class Channel; class Channel;
class GenericServerContext; class GenericServerContext;
class RpcService;
class ServerAsyncStreamingInterface;
class ServerCompletionQueue; class ServerCompletionQueue;
class ServerContext; class ServerContext;
class ServerCredentials; class ServerCredentials;
class Service; class Service;
class ThreadPoolInterface;
extern CoreCodegenInterface* g_core_codegen_interface; extern CoreCodegenInterface* g_core_codegen_interface;
/// Models a gRPC server. /// Models a gRPC server.
/// ///
/// Servers are configured and started via \a grpc::ServerBuilder. /// Servers are configured and started via \a grpc::ServerBuilder.
class ServerInterface : public CallHook { namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
class ServerInterface : public internal::CallHook {
public: public:
virtual ~ServerInterface() {} virtual ~ServerInterface() {}
@ -78,7 +79,7 @@ class ServerInterface : public CallHook {
virtual void Wait() = 0; virtual void Wait() = 0;
protected: protected:
friend class Service; friend class ::grpc::Service;
/// Register a service. This call does not take ownership of the service. /// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance. /// The service must exist for the lifetime of the Server instance.
@ -116,12 +117,13 @@ class ServerInterface : public CallHook {
virtual grpc_server* server() = 0; virtual grpc_server* server() = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0; virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
class BaseAsyncRequest : public CompletionQueueTag { class BaseAsyncRequest : public internal::CompletionQueueTag {
public: public:
BaseAsyncRequest(ServerInterface* server, ServerContext* context, BaseAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag, CompletionQueue* call_cq, void* tag,
bool delete_on_finalize); bool delete_on_finalize);
virtual ~BaseAsyncRequest(); virtual ~BaseAsyncRequest();
@ -131,7 +133,7 @@ class ServerInterface : public CallHook {
protected: protected:
ServerInterface* const server_; ServerInterface* const server_;
ServerContext* const context_; ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_; internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_; CompletionQueue* const call_cq_;
void* const tag_; void* const tag_;
const bool delete_on_finalize_; const bool delete_on_finalize_;
@ -141,7 +143,7 @@ class ServerInterface : public CallHook {
class RegisteredAsyncRequest : public BaseAsyncRequest { class RegisteredAsyncRequest : public BaseAsyncRequest {
public: public:
RegisteredAsyncRequest(ServerInterface* server, ServerContext* context, RegisteredAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag); CompletionQueue* call_cq, void* tag);
// uses BaseAsyncRequest::FinalizeResult // uses BaseAsyncRequest::FinalizeResult
@ -155,7 +157,7 @@ class ServerInterface : public CallHook {
public: public:
NoPayloadAsyncRequest(void* registered_method, ServerInterface* server, NoPayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context, ServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) ServerCompletionQueue* notification_cq, void* tag)
: RegisteredAsyncRequest(server, context, stream, call_cq, tag) { : RegisteredAsyncRequest(server, context, stream, call_cq, tag) {
@ -170,7 +172,7 @@ class ServerInterface : public CallHook {
public: public:
PayloadAsyncRequest(void* registered_method, ServerInterface* server, PayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context, ServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, ServerCompletionQueue* notification_cq, void* tag,
Message* request) Message* request)
@ -212,7 +214,7 @@ class ServerInterface : public CallHook {
void* const registered_method_; void* const registered_method_;
ServerInterface* const server_; ServerInterface* const server_;
ServerContext* const context_; ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_; internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_; CompletionQueue* const call_cq_;
ServerCompletionQueue* const notification_cq_; ServerCompletionQueue* const notification_cq_;
void* const tag_; void* const tag_;
@ -223,7 +225,7 @@ class ServerInterface : public CallHook {
class GenericAsyncRequest : public BaseAsyncRequest { class GenericAsyncRequest : public BaseAsyncRequest {
public: public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context, GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize); bool delete_on_finalize);
@ -235,8 +237,9 @@ class ServerInterface : public CallHook {
}; };
template <class Message> template <class Message>
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context, void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerAsyncStreamingInterface* stream, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, ServerCompletionQueue* notification_cq, void* tag,
Message* message) { Message* message) {
@ -246,8 +249,9 @@ class ServerInterface : public CallHook {
message); message);
} }
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context, void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerAsyncStreamingInterface* stream, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) { ServerCompletionQueue* notification_cq, void* tag) {
GPR_CODEGEN_ASSERT(method); GPR_CODEGEN_ASSERT(method);
@ -256,7 +260,7 @@ class ServerInterface : public CallHook {
} }
void RequestAsyncGenericCall(GenericServerContext* context, void RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, ServerCompletionQueue* notification_cq,
void* tag) { void* tag) {

@ -28,13 +28,14 @@
namespace grpc { namespace grpc {
class Call;
class CompletionQueue; class CompletionQueue;
class Server; class Server;
class ServerInterface; class ServerInterface;
class ServerCompletionQueue; class ServerCompletionQueue;
class ServerContext; class ServerContext;
namespace internal {
class Call;
class ServerAsyncStreamingInterface { class ServerAsyncStreamingInterface {
public: public:
virtual ~ServerAsyncStreamingInterface() {} virtual ~ServerAsyncStreamingInterface() {}
@ -48,9 +49,10 @@ class ServerAsyncStreamingInterface {
virtual void SendInitialMetadata(void* tag) = 0; virtual void SendInitialMetadata(void* tag) = 0;
private: private:
friend class ServerInterface; friend class ::grpc::ServerInterface;
virtual void BindCall(Call* call) = 0; virtual void BindCall(Call* call) = 0;
}; };
} // namespace internal
/// Desriptor of an RPC service and its various RPC methods /// Desriptor of an RPC service and its various RPC methods
class Service { class Service {
@ -88,40 +90,38 @@ class Service {
protected: protected:
template <class Message> template <class Message>
void RequestAsyncUnary(int index, ServerContext* context, Message* request, void RequestAsyncUnary(int index, ServerContext* context, Message* request,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) { ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq, server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request); notification_cq, tag, request);
} }
void RequestAsyncClientStreaming(int index, ServerContext* context, void RequestAsyncClientStreaming(
ServerAsyncStreamingInterface* stream, int index, ServerContext* context,
CompletionQueue* call_cq, internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, ServerCompletionQueue* notification_cq, void* tag) {
void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq, server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag); notification_cq, tag);
} }
template <class Message> template <class Message>
void RequestAsyncServerStreaming(int index, ServerContext* context, void RequestAsyncServerStreaming(
Message* request, int index, ServerContext* context, Message* request,
ServerAsyncStreamingInterface* stream, internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
CompletionQueue* call_cq, ServerCompletionQueue* notification_cq, void* tag) {
ServerCompletionQueue* notification_cq,
void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq, server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request); notification_cq, tag, request);
} }
void RequestAsyncBidiStreaming(int index, ServerContext* context, void RequestAsyncBidiStreaming(
ServerAsyncStreamingInterface* stream, int index, ServerContext* context,
CompletionQueue* call_cq, internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, ServerCompletionQueue* notification_cq, void* tag) {
void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq, server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag); notification_cq, tag);
} }
void AddMethod(RpcServiceMethod* method) { methods_.emplace_back(method); } void AddMethod(internal::RpcServiceMethod* method) {
methods_.emplace_back(method);
}
void MarkMethodAsync(int index) { void MarkMethodAsync(int index) {
GPR_CODEGEN_ASSERT( GPR_CODEGEN_ASSERT(
@ -139,7 +139,7 @@ class Service {
methods_[index].reset(); methods_[index].reset();
} }
void MarkMethodStreamed(int index, MethodHandler* streamed_method) { void MarkMethodStreamed(int index, internal::MethodHandler* streamed_method) {
GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() && GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
"Cannot mark an async or generic method Streamed"); "Cannot mark an async or generic method Streamed");
methods_[index]->SetHandler(streamed_method); methods_[index]->SetHandler(streamed_method);
@ -148,14 +148,14 @@ class Service {
// case of BIDI_STREAMING that has 1 read and 1 write, in that order, // case of BIDI_STREAMING that has 1 read and 1 write, in that order,
// and split server-side streaming is BIDI_STREAMING with 1 read and // and split server-side streaming is BIDI_STREAMING with 1 read and
// any number of writes, in that order. // any number of writes, in that order.
methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING); methods_[index]->SetMethodType(internal::RpcMethod::BIDI_STREAMING);
} }
private: private:
friend class Server; friend class Server;
friend class ServerInterface; friend class ServerInterface;
ServerInterface* server_; ServerInterface* server_;
std::vector<std::unique_ptr<RpcServiceMethod>> methods_; std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_;
}; };
} // namespace grpc } // namespace grpc

@ -30,6 +30,7 @@
namespace grpc { namespace grpc {
namespace internal {
/// Common interface for all synchronous client side streaming. /// Common interface for all synchronous client side streaming.
class ClientStreamingInterface { class ClientStreamingInterface {
public: public:
@ -141,10 +142,12 @@ class WriterInterface {
} }
}; };
} // namespace internal
/// Client-side interface for streaming reads of message of type \a R. /// Client-side interface for streaming reads of message of type \a R.
template <class R> template <class R>
class ClientReaderInterface : public ClientStreamingInterface, class ClientReaderInterface : public internal::ClientStreamingInterface,
public ReaderInterface<R> { public internal::ReaderInterface<R> {
public: public:
/// Block to wait for initial metadata from server. The received metadata /// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before /// can only be accessed after this call returns. Should only be called before
@ -153,35 +156,25 @@ class ClientReaderInterface : public ClientStreamingInterface,
virtual void WaitForInitialMetadata() = 0; virtual void WaitForInitialMetadata() = 0;
}; };
namespace internal {
template <class R>
class ClientReaderFactory {
public:
template <class W>
static ClientReader<R>* Create(ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request) {
return new ClientReader<R>(channel, method, context, request);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for doing server-streaming RPCs, /// Synchronous (blocking) client-side API for doing server-streaming RPCs,
/// where the stream of messages coming from the server has messages /// where the stream of messages coming from the server has messages
/// of type \a R. /// of type \a R.
template <class R> template <class R>
class ClientReader final : public ClientReaderInterface<R> { class ClientReader final : public ClientReaderInterface<R> {
public: public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics. /// semantics.
/// ///
@ -192,7 +185,8 @@ class ClientReader final : public ClientReaderInterface<R> {
void WaitForInitialMetadata() override { void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops); call_.PerformOps(&ops);
cq_.Pluck(&ops); /// status ignored cq_.Pluck(&ops); /// status ignored
@ -209,7 +203,9 @@ class ClientReader final : public ClientReaderInterface<R> {
/// already received (if initial metadata is received, it can be then /// already received (if initial metadata is received, it can be then
/// accessed through the \a ClientContext associated with this call). /// accessed through the \a ClientContext associated with this call).
bool Read(R* msg) override { bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
} }
@ -224,7 +220,7 @@ class ClientReader final : public ClientReaderInterface<R> {
/// The \a ClientContext associated with this call is updated with /// The \a ClientContext associated with this call is updated with
/// possible metadata received from the server. /// possible metadata received from the server.
Status Finish() override { Status Finish() override {
CallOpSet<CallOpClientRecvStatus> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
Status status; Status status;
ops.ClientRecvStatus(context_, &status); ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops); call_.PerformOps(&ops);
@ -233,15 +229,41 @@ class ClientReader final : public ClientReaderInterface<R> {
} }
private: private:
friend class internal::ClientReaderFactory<R>;
ClientContext* context_; ClientContext* context_;
CompletionQueue cq_; CompletionQueue cq_;
Call call_; ::grpc::internal::Call call_;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}; };
/// Client-side interface for streaming writes of message type \a W. /// Client-side interface for streaming writes of message type \a W.
template <class W> template <class W>
class ClientWriterInterface : public ClientStreamingInterface, class ClientWriterInterface : public internal::ClientStreamingInterface,
public WriterInterface<W> { public internal::WriterInterface<W> {
public: public:
/// Half close writing from the client. (signal that the stream of messages /// Half close writing from the client. (signal that the stream of messages
/// coming from the client is complete). /// coming from the client is complete).
@ -252,37 +274,25 @@ class ClientWriterInterface : public ClientStreamingInterface,
virtual bool WritesDone() = 0; virtual bool WritesDone() = 0;
}; };
namespace internal {
template <class W>
class ClientWriterFactory {
public:
template <class R>
static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response) {
return new ClientWriter<W>(channel, method, context, response);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for doing client-streaming RPCs, /// Synchronous (blocking) client-side API for doing client-streaming RPCs,
/// where the outgoing message stream coming from the client has messages of /// where the outgoing message stream coming from the client has messages of
/// type \a W. /// type \a W.
template <class W> template <class W>
class ClientWriter : public ClientWriterInterface<W> { class ClientWriter : public ClientWriterInterface<W> {
public: public:
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics. /// semantics.
/// ///
@ -292,7 +302,8 @@ class ClientWriter : public ClientWriterInterface<W> {
void WaitForInitialMetadata() { void WaitForInitialMetadata() {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops); call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored cq_.Pluck(&ops); // status ignored
@ -304,10 +315,11 @@ class ClientWriter : public ClientWriterInterface<W> {
/// Side effect: /// Side effect:
/// Also sends initial metadata if not already sent (using the /// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call). /// \a ClientContext associated with this call).
using WriterInterface<W>::Write; using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override { bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
CallOpClientSendClose> ::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops; ops;
if (options.is_last_message()) { if (options.is_last_message()) {
@ -328,7 +340,7 @@ class ClientWriter : public ClientWriterInterface<W> {
} }
bool WritesDone() override { bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose(); ops.ClientSendClose();
call_.PerformOps(&ops); call_.PerformOps(&ops);
return cq_.Pluck(&ops); return cq_.Pluck(&ops);
@ -352,21 +364,51 @@ class ClientWriter : public ClientWriterInterface<W> {
} }
private: private:
friend class internal::ClientWriterFactory<W>;
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
ClientContext* context_; ClientContext* context_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
CallOpClientRecvStatus> ::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_; finish_ops_;
CompletionQueue cq_; CompletionQueue cq_;
Call call_; ::grpc::internal::Call call_;
}; };
/// Client-side interface for bi-directional streaming with /// Client-side interface for bi-directional streaming with
/// client-to-server stream messages of type \a W and /// client-to-server stream messages of type \a W and
/// server-to-client stream messages of type \a R. /// server-to-client stream messages of type \a R.
template <class W, class R> template <class W, class R>
class ClientReaderWriterInterface : public ClientStreamingInterface, class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
public WriterInterface<W>, public internal::WriterInterface<W>,
public ReaderInterface<R> { public internal::ReaderInterface<R> {
public: public:
/// Block to wait for initial metadata from server. The received metadata /// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before /// can only be accessed after this call returns. Should only be called before
@ -375,7 +417,7 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
virtual void WaitForInitialMetadata() = 0; virtual void WaitForInitialMetadata() = 0;
/// Half close writing from the client. (signal that the stream of messages /// Half close writing from the client. (signal that the stream of messages
/// coming from the client is complete). /// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed. /// Blocks until currently-pending writes are completed.
/// Thread-safe with respect to \a ReaderInterface::Read /// Thread-safe with respect to \a ReaderInterface::Read
/// ///
@ -383,6 +425,18 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
virtual bool WritesDone() = 0; virtual bool WritesDone() = 0;
}; };
namespace internal {
template <class W, class R>
class ClientReaderWriterFactory {
public:
static ClientReaderWriter<W, R>* Create(
::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method, ClientContext* context) {
return new ClientReaderWriter<W, R>(channel, method, context);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for bi-directional streaming RPCs, /// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
/// where the outgoing message stream coming from the client has messages of /// where the outgoing message stream coming from the client has messages of
/// type \a W, and the incoming messages stream coming from the server has /// type \a W, and the incoming messages stream coming from the server has
@ -390,25 +444,6 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
template <class W, class R> template <class W, class R>
class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> { class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
public: public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
/// Block waiting to read initial metadata from the server. /// Block waiting to read initial metadata from the server.
/// This call is optional, but if it is used, it cannot be used concurrently /// This call is optional, but if it is used, it cannot be used concurrently
/// with or after the \a Finish method. /// with or after the \a Finish method.
@ -418,7 +453,8 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
void WaitForInitialMetadata() override { void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops); call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored cq_.Pluck(&ops); // status ignored
@ -434,7 +470,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Also receives initial metadata if not already received (updates the \a /// Also receives initial metadata if not already received (updates the \a
/// ClientContext associated with this call in that case). /// ClientContext associated with this call in that case).
bool Read(R* msg) override { bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
} }
@ -448,10 +486,11 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Side effect: /// Side effect:
/// Also sends initial metadata if not already sent (using the /// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values). /// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write; using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override { bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
CallOpClientSendClose> ::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops; ops;
if (options.is_last_message()) { if (options.is_last_message()) {
@ -472,7 +511,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
} }
bool WritesDone() override { bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose(); ops.ClientSendClose();
call_.PerformOps(&ops); call_.PerformOps(&ops);
return cq_.Pluck(&ops); return cq_.Pluck(&ops);
@ -484,7 +523,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// - the \a ClientContext associated with this call is updated with /// - the \a ClientContext associated with this call is updated with
/// possible trailing metadata sent from the server. /// possible trailing metadata sent from the server.
Status Finish() override { Status Finish() override {
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> ops; ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
ops;
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_); ops.RecvInitialMetadata(context_);
} }
@ -496,15 +537,38 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
} }
private: private:
friend class internal::ClientReaderWriterFactory<W, R>;
ClientContext* context_; ClientContext* context_;
CompletionQueue cq_; CompletionQueue cq_;
Call call_; ::grpc::internal::Call call_;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
}; };
/// Server-side interface for streaming reads of message of type \a R. /// Server-side interface for streaming reads of message of type \a R.
template <class R> template <class R>
class ServerReaderInterface : public ServerStreamingInterface, class ServerReaderInterface : public internal::ServerStreamingInterface,
public ReaderInterface<R> {}; public internal::ReaderInterface<R> {};
/// Synchronous (blocking) server-side API for doing client-streaming RPCs, /// Synchronous (blocking) server-side API for doing client-streaming RPCs,
/// where the incoming message stream coming from the client has messages of /// where the incoming message stream coming from the client has messages of
@ -512,15 +576,13 @@ class ServerReaderInterface : public ServerStreamingInterface,
template <class R> template <class R>
class ServerReader final : public ServerReaderInterface<R> { class ServerReader final : public ServerReaderInterface<R> {
public: public:
ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method /// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the /// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call. /// \a ServerContext associated with this call.
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops; internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_, ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -537,21 +599,27 @@ class ServerReader final : public ServerReaderInterface<R> {
} }
bool Read(R* msg) override { bool Read(R* msg) override {
CallOpSet<CallOpRecvMessage<R>> ops; internal::CallOpSet<internal::CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg); ops.RecvMessage(msg);
call_->PerformOps(&ops); call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message; return call_->cq()->Pluck(&ops) && ops.got_message;
} }
private: private:
Call* const call_; internal::Call* const call_;
ServerContext* const ctx_; ServerContext* const ctx_;
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ClientStreamingHandler;
ServerReader(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
}; };
/// Server-side interface for streaming writes of message of type \a W. /// Server-side interface for streaming writes of message of type \a W.
template <class W> template <class W>
class ServerWriterInterface : public ServerStreamingInterface, class ServerWriterInterface : public internal::ServerStreamingInterface,
public WriterInterface<W> {}; public internal::WriterInterface<W> {};
/// Synchronous (blocking) server-side API for doing for doing a /// Synchronous (blocking) server-side API for doing for doing a
/// server-streaming RPCs, where the outgoing message stream coming from the /// server-streaming RPCs, where the outgoing message stream coming from the
@ -559,8 +627,6 @@ class ServerWriterInterface : public ServerStreamingInterface,
template <class W> template <class W>
class ServerWriter final : public ServerWriterInterface<W> { class ServerWriter final : public ServerWriterInterface<W> {
public: public:
ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method /// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. /// for semantics.
/// Note that initial metadata will be affected by the /// Note that initial metadata will be affected by the
@ -568,7 +634,7 @@ class ServerWriter final : public ServerWriterInterface<W> {
void SendInitialMetadata() override { void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops; internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_, ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags()); ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) { if (ctx_->compression_level_set()) {
@ -584,11 +650,12 @@ class ServerWriter final : public ServerWriterInterface<W> {
/// Side effect: /// Side effect:
/// Also sends initial metadata if not already sent (using the /// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values). /// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write; using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override { bool Write(const W& msg, WriteOptions options) override {
if (options.is_last_message()) { if (options.is_last_message()) {
options.set_buffer_hint(); options.set_buffer_hint();
} }
if (!ctx_->pending_ops_.SendMessage(msg, options).ok()) { if (!ctx_->pending_ops_.SendMessage(msg, options).ok()) {
return false; return false;
} }
@ -613,15 +680,21 @@ class ServerWriter final : public ServerWriterInterface<W> {
} }
private: private:
Call* const call_; internal::Call* const call_;
ServerContext* const ctx_; ServerContext* const ctx_;
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ServerStreamingHandler;
ServerWriter(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
}; };
/// Server-side interface for bi-directional streaming. /// Server-side interface for bi-directional streaming.
template <class W, class R> template <class W, class R>
class ServerReaderWriterInterface : public ServerStreamingInterface, class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
public WriterInterface<W>, public internal::WriterInterface<W>,
public ReaderInterface<R> {}; public internal::ReaderInterface<R> {};
/// Actual implementation of bi-directional streaming /// Actual implementation of bi-directional streaming
namespace internal { namespace internal {
@ -688,6 +761,7 @@ class ServerReaderWriterBody final {
Call* const call_; Call* const call_;
ServerContext* const ctx_; ServerContext* const ctx_;
}; };
} // namespace internal } // namespace internal
/// Synchronous (blocking) server-side API for a bidirectional /// Synchronous (blocking) server-side API for a bidirectional
@ -697,8 +771,6 @@ class ServerReaderWriterBody final {
template <class W, class R> template <class W, class R>
class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> { class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
public: public:
ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method /// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the /// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call. /// \a ServerContext associated with this call.
@ -715,13 +787,18 @@ class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
/// Side effect: /// Side effect:
/// Also sends initial metadata if not already sent (using the \a /// Also sends initial metadata if not already sent (using the \a
/// ServerContext associated with this call). /// ServerContext associated with this call).
using WriterInterface<W>::Write; using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override { bool Write(const W& msg, WriteOptions options) override {
return body_.Write(msg, options); return body_.Write(msg, options);
} }
private: private:
internal::ServerReaderWriterBody<W, R> body_; internal::ServerReaderWriterBody<W, R> body_;
friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
false>;
ServerReaderWriter(internal::Call* call, ServerContext* ctx)
: body_(call, ctx) {}
}; };
/// A class to represent a flow-controlled unary call. This is something /// A class to represent a flow-controlled unary call. This is something
@ -736,9 +813,6 @@ template <class RequestType, class ResponseType>
class ServerUnaryStreamer final class ServerUnaryStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> { : public ServerReaderWriterInterface<ResponseType, RequestType> {
public: public:
ServerUnaryStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
/// Block to send initial metadata to client. /// Block to send initial metadata to client.
/// Implicit input parameter: /// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for /// - the \a ServerContext associated with this call will be used for
@ -775,7 +849,7 @@ class ServerUnaryStreamer final
/// \param options The WriteOptions affecting the write operation. /// \param options The WriteOptions affecting the write operation.
/// ///
/// \return \a true on success, \a false when the stream has been closed. /// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write; using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override { bool Write(const ResponseType& response, WriteOptions options) override {
if (write_done_ || !read_done_) { if (write_done_ || !read_done_) {
return false; return false;
@ -788,6 +862,11 @@ class ServerUnaryStreamer final
internal::ServerReaderWriterBody<ResponseType, RequestType> body_; internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_; bool read_done_;
bool write_done_; bool write_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerUnaryStreamer<RequestType, ResponseType>, true>;
ServerUnaryStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
}; };
/// A class to represent a flow-controlled server-side streaming call. /// A class to represent a flow-controlled server-side streaming call.
@ -799,9 +878,6 @@ template <class RequestType, class ResponseType>
class ServerSplitStreamer final class ServerSplitStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> { : public ServerReaderWriterInterface<ResponseType, RequestType> {
public: public:
ServerSplitStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
/// Block to send initial metadata to client. /// Block to send initial metadata to client.
/// Implicit input parameter: /// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for /// - the \a ServerContext associated with this call will be used for
@ -838,7 +914,7 @@ class ServerSplitStreamer final
/// \param options The WriteOptions affecting the write operation. /// \param options The WriteOptions affecting the write operation.
/// ///
/// \return \a true on success, \a false when the stream has been closed. /// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write; using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override { bool Write(const ResponseType& response, WriteOptions options) override {
return read_done_ && body_.Write(response, options); return read_done_ && body_.Write(response, options);
} }
@ -846,6 +922,11 @@ class ServerSplitStreamer final
private: private:
internal::ServerReaderWriterBody<ResponseType, RequestType> body_; internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_; bool read_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerSplitStreamer<RequestType, ResponseType>, false>;
ServerSplitStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
}; };
} // namespace grpc } // namespace grpc

@ -19,6 +19,8 @@
#ifndef GRPCXX_IMPL_CODEGEN_TIME_H #ifndef GRPCXX_IMPL_CODEGEN_TIME_H
#define GRPCXX_IMPL_CODEGEN_TIME_H #define GRPCXX_IMPL_CODEGEN_TIME_H
#include <chrono>
#include <grpc++/impl/codegen/config.h> #include <grpc++/impl/codegen/config.h>
#include <grpc/impl/codegen/grpc_types.h> #include <grpc/impl/codegen/grpc_types.h>
@ -59,10 +61,6 @@ class TimePoint<gpr_timespec> {
} // namespace grpc } // namespace grpc
#include <chrono>
#include <grpc/impl/codegen/grpc_types.h>
namespace grpc { namespace grpc {
// from and to should be absolute time. // from and to should be absolute time.

@ -175,7 +175,8 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
/// \param num_cqs How many completion queues does \a cqs hold. /// \param num_cqs How many completion queues does \a cqs hold.
void Start(ServerCompletionQueue** cqs, size_t num_cqs) override; void Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override; void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void ShutdownInternal(gpr_timespec deadline) override; void ShutdownInternal(gpr_timespec deadline) override;

@ -40,7 +40,6 @@ namespace grpc {
class AsyncGenericService; class AsyncGenericService;
class ResourceQuota; class ResourceQuota;
class CompletionQueue; class CompletionQueue;
class RpcService;
class Server; class Server;
class ServerCompletionQueue; class ServerCompletionQueue;
class ServerCredentials; class ServerCredentials;

@ -316,6 +316,43 @@ typedef struct grpc_server_credentials grpc_server_credentials;
*/ */
GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds); GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds);
/** Server certificate config object holds the server's public certificates and
associated private keys, as well as any CA certificates needed for client
certificate validation (if applicable). Create using
grpc_ssl_server_certificate_config_create(). */
typedef struct grpc_ssl_server_certificate_config
grpc_ssl_server_certificate_config;
/** Creates a grpc_ssl_server_certificate_config object.
- pem_roots_cert is the NULL-terminated string containing the PEM encoding of
the client root certificates. This parameter may be NULL if the server does
not want the client to be authenticated with SSL.
- pem_key_cert_pairs is an array private key / certificate chains of the
server. This parameter cannot be NULL.
- num_key_cert_pairs indicates the number of items in the private_key_files
and cert_chain_files parameters. It must be at least 1.
- It is the caller's responsibility to free this object via
grpc_ssl_server_certificate_config_destroy(). */
GRPCAPI grpc_ssl_server_certificate_config *
grpc_ssl_server_certificate_config_create(
const char *pem_root_certs,
const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs);
/** Destroys a grpc_ssl_server_certificate_config object. */
GRPCAPI void grpc_ssl_server_certificate_config_destroy(
grpc_ssl_server_certificate_config *config);
/** Callback to retrieve updated SSL server certificates, private keys, and
trusted CAs (for client authentication).
- user_data parameter, if not NULL, contains opaque data to be used by the
callback.
- Use grpc_ssl_server_certificate_config_create to create the config.
- The caller assumes ownership of the config. */
typedef grpc_ssl_certificate_config_reload_status (
*grpc_ssl_server_certificate_config_callback)(
void *user_data, grpc_ssl_server_certificate_config **config);
/** Deprecated in favor of grpc_ssl_server_credentials_create_ex. /** Deprecated in favor of grpc_ssl_server_credentials_create_ex.
Creates an SSL server_credentials object. Creates an SSL server_credentials object.
- pem_roots_cert is the NULL-terminated string containing the PEM encoding of - pem_roots_cert is the NULL-terminated string containing the PEM encoding of
@ -332,7 +369,8 @@ GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void *reserved); size_t num_key_cert_pairs, int force_client_auth, void *reserved);
/** Same as grpc_ssl_server_credentials_create method except uses /** Deprecated in favor of grpc_ssl_server_credentials_create_with_options.
Same as grpc_ssl_server_credentials_create method except uses
grpc_ssl_client_certificate_request_type enum to support more ways to grpc_ssl_client_certificate_request_type enum to support more ways to
authenticate client cerificates.*/ authenticate client cerificates.*/
GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex( GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
@ -341,6 +379,40 @@ GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_client_certificate_request_type client_certificate_request,
void *reserved); void *reserved);
typedef struct grpc_ssl_server_credentials_options
grpc_ssl_server_credentials_options;
/** Creates an options object using a certificate config. Use this method when
the certificates and keys of the SSL server will not change during the
server's lifetime.
- Takes ownership of the certificate_config parameter. */
GRPCAPI grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config *certificate_config);
/** Creates an options object using a certificate config fetcher. Use this
method to reload the certificates and keys of the SSL server without
interrupting the operation of the server. Initial certificate config will be
fetched during server initialization.
- user_data parameter, if not NULL, contains opaque data which will be passed
to the fetcher (see definition of
grpc_ssl_server_certificate_config_callback). */
GRPCAPI grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config_fetcher(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config_callback cb, void *user_data);
/** Destroys a grpc_ssl_server_credentials_options object. */
GRPCAPI void grpc_ssl_server_credentials_options_destroy(
grpc_ssl_server_credentials_options *options);
/** Creates an SSL server_credentials object using the provided options struct.
- Takes ownership of the options parameter. */
GRPCAPI grpc_server_credentials *
grpc_ssl_server_credentials_create_with_options(
grpc_ssl_server_credentials_options *options);
/** --- Server-side secure ports. --- */ /** --- Server-side secure ports. --- */
/** Add a HTTP2 over an encrypted link over tcp listener. /** Add a HTTP2 over an encrypted link over tcp listener.

@ -48,6 +48,13 @@ typedef enum {
GRPC_SSL_ROOTS_OVERRIDE_FAIL GRPC_SSL_ROOTS_OVERRIDE_FAIL
} grpc_ssl_roots_override_result; } grpc_ssl_roots_override_result;
/** Callback results for dynamically loading a SSL certificate config. */
typedef enum {
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED,
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW,
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
} grpc_ssl_certificate_config_reload_status;
typedef enum { typedef enum {
/** Server does not request client certificate. A client can present a self /** Server does not request client certificate. A client can present a self
signed or signed certificates if it wishes to do so and they would be signed or signed certificates if it wishes to do so and they would be

@ -25,8 +25,6 @@ extern "C" {
/** Connectivity state of a channel. */ /** Connectivity state of a channel. */
typedef enum { typedef enum {
/** channel has just been initialized */
GRPC_CHANNEL_INIT = -1,
/** channel is idle */ /** channel is idle */
GRPC_CHANNEL_IDLE, GRPC_CHANNEL_IDLE,
/** channel is connecting */ /** channel is connecting */

@ -242,6 +242,7 @@
<file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.h" role="src" /> <file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.h" role="src" />
<file baseinstalldir="/" name="src/core/tsi/transport_security_interface.h" role="src" /> <file baseinstalldir="/" name="src/core/tsi/transport_security_interface.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.h" role="src" />
@ -395,6 +396,7 @@
<file baseinstalldir="/" name="third_party/nanopb/pb_decode.h" role="src" /> <file baseinstalldir="/" name="third_party/nanopb/pb_decode.h" role="src" />
<file baseinstalldir="/" name="third_party/nanopb/pb_encode.h" role="src" /> <file baseinstalldir="/" name="third_party/nanopb/pb_encode.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_filter.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_filter.h" role="src" />
@ -615,6 +617,7 @@
<file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.cc" role="src" /> <file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/channel_connectivity.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/channel_connectivity.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.cc" role="src" />
@ -654,6 +657,7 @@
<file baseinstalldir="/" name="third_party/nanopb/pb_encode.c" role="src" /> <file baseinstalldir="/" name="third_party/nanopb/pb_encode.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc" role="src" />

@ -140,7 +140,6 @@ grpc::string GetHeaderIncludes(grpc_generator::File *file,
printer->Print(vars, "namespace grpc {\n"); printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "class CompletionQueue;\n"); printer->Print(vars, "class CompletionQueue;\n");
printer->Print(vars, "class Channel;\n"); printer->Print(vars, "class Channel;\n");
printer->Print(vars, "class RpcService;\n");
printer->Print(vars, "class ServerCompletionQueue;\n"); printer->Print(vars, "class ServerCompletionQueue;\n");
printer->Print(vars, "class ServerContext;\n"); printer->Print(vars, "class ServerContext;\n");
printer->Print(vars, "} // namespace grpc\n\n"); printer->Print(vars, "} // namespace grpc\n\n");
@ -324,7 +323,8 @@ void PrintHeaderClientMethodInterfaces(
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw(" "virtual ::grpc::ClientReaderInterface< $Response$>* "
"$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n"); "::grpc::ClientContext* context, const $Request$& request) = 0;\n");
for (auto async_prefix : async_prefixes) { for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix; (*vars)["AsyncPrefix"] = async_prefix.prefix;
@ -546,7 +546,8 @@ void PrintHeaderClientMethodData(grpc_generator::Printer *printer,
const grpc_generator::Method *method, const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) { std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name(); (*vars)["Method"] = method->name();
printer->Print(*vars, "const ::grpc::RpcMethod rpcmethod_$Method$_;\n"); printer->Print(*vars,
"const ::grpc::internal::RpcMethod rpcmethod_$Method$_;\n");
} }
void PrintHeaderServerMethodSync(grpc_generator::Printer *printer, void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
@ -718,7 +719,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars, printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n" "WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n" " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, " " new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind" "$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::" "(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, " "Streamed$Method$, this, std::placeholders::_1, "
@ -766,10 +767,11 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n"); "{}\n");
printer->Print(" public:\n"); printer->Print(" public:\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(
*vars,
"WithSplitStreamingMethod_$Method$() {\n" "WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n" " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::SplitServerStreamingHandler< $Request$, " " new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind" "$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::" "(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, " "Streamed$Method$, this, std::placeholders::_1, "
@ -914,7 +916,8 @@ void PrintHeaderService(grpc_generator::Printer *printer,
" {\n public:\n"); " {\n public:\n");
printer->Indent(); printer->Indent();
printer->Print( printer->Print(
"Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n"); "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& "
"channel);\n");
for (int i = 0; i < service->method_count(); ++i) { for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderClientMethod(printer, service->method(i).get(), vars, true); PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
} }
@ -1185,10 +1188,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, " "::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) {\n"); "const $Request$& request, $Response$* response) {\n");
printer->Print(*vars, printer->Print(*vars,
" return ::grpc::BlockingUnaryCall(channel_.get(), " " return ::grpc::internal::BlockingUnaryCall"
"rpcmethod_$Method$_, " "(channel_.get(), rpcmethod_$Method$_, "
"context, request, response);\n" "context, request, response);\n}\n\n");
"}\n\n");
for (auto async_prefix : async_prefixes) { for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix; (*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start; (*vars)["AsyncStart"] = async_prefix.start;
@ -1198,10 +1200,11 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"ClientContext* context, " "ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n"); "::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars, printer->Print(
*vars,
" return " " return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create(" "::grpc::internal::ClientAsyncResponseReaderFactory< $Response$>"
"channel_.get(), cq, " "::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request, $AsyncStart$);\n" "context, request, $AsyncStart$);\n"
"}\n\n"); "}\n\n");
@ -1211,8 +1214,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientWriter< $Request$>* " "::grpc::ClientWriter< $Request$>* "
"$ns$$Service$::Stub::$Method$Raw(" "$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, $Response$* response) {\n"); "::grpc::ClientContext* context, $Response$* response) {\n");
printer->Print(*vars, printer->Print(
" return new ::grpc::ClientWriter< $Request$>(" *vars,
" return ::grpc::internal::ClientWriterFactory< $Request$>::Create("
"channel_.get(), " "channel_.get(), "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, response);\n" "context, response);\n"
@ -1227,9 +1231,10 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(" "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, " "::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars, printer->Print(
" return ::grpc::ClientAsyncWriter< $Request$>::Create(" *vars,
"channel_.get(), cq, " " return ::grpc::internal::ClientAsyncWriterFactory< $Request$>"
"::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, response, $AsyncStart$$AsyncCreateArgs$);\n" "context, response, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n"); "}\n\n");
@ -1240,8 +1245,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReader< $Response$>* " "::grpc::ClientReader< $Response$>* "
"$ns$$Service$::Stub::$Method$Raw(" "$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) {\n"); "::grpc::ClientContext* context, const $Request$& request) {\n");
printer->Print(*vars, printer->Print(
" return new ::grpc::ClientReader< $Response$>(" *vars,
" return ::grpc::internal::ClientReaderFactory< $Response$>::Create("
"channel_.get(), " "channel_.get(), "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request);\n" "context, request);\n"
@ -1257,9 +1263,10 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(" "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars, printer->Print(
" return ::grpc::ClientAsyncReader< $Response$>::Create(" *vars,
"channel_.get(), cq, " " return ::grpc::internal::ClientAsyncReaderFactory< $Response$>"
"::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request, $AsyncStart$$AsyncCreateArgs$);\n" "context, request, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n"); "}\n\n");
@ -1270,8 +1277,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReaderWriter< $Request$, $Response$>* " "::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::$Method$Raw(::grpc::ClientContext* context) {\n"); "$ns$$Service$::Stub::$Method$Raw(::grpc::ClientContext* context) {\n");
printer->Print(*vars, printer->Print(*vars,
" return new ::grpc::ClientReaderWriter< " " return ::grpc::internal::ClientReaderWriterFactory< "
"$Request$, $Response$>(" "$Request$, $Response$>::Create("
"channel_.get(), " "channel_.get(), "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context);\n" "context);\n"
@ -1286,10 +1293,10 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::" "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"ClientContext* context, " "ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print( printer->Print(*vars,
*vars,
" return " " return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create(" "::grpc::internal::ClientAsyncReaderWriterFactory< "
"$Request$, $Response$>::Create("
"channel_.get(), cq, " "channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, $AsyncStart$$AsyncCreateArgs$);\n" "context, $AsyncStart$$AsyncCreateArgs$);\n"
@ -1404,7 +1411,7 @@ void PrintSourceService(grpc_generator::Printer *printer,
printer->Print(*vars, printer->Print(*vars,
", rpcmethod_$Method$_(" ", rpcmethod_$Method$_("
"$prefix$$Service$_method_names[$Idx$], " "$prefix$$Service$_method_names[$Idx$], "
"::grpc::RpcMethod::$StreamingType$, " "::grpc::internal::RpcMethod::$StreamingType$, "
"channel" "channel"
")\n"); ")\n");
} }
@ -1427,38 +1434,38 @@ void PrintSourceService(grpc_generator::Printer *printer,
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n" "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n" " $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::NORMAL_RPC,\n" " ::grpc::internal::RpcMethod::NORMAL_RPC,\n"
" new ::grpc::RpcMethodHandler< $ns$$Service$::Service, " " new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, " "$Request$, "
"$Response$>(\n" "$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ClientOnlyStreaming(method.get())) { } else if (ClientOnlyStreaming(method.get())) {
printer->Print( printer->Print(
*vars, *vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n" "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n" " $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::CLIENT_STREAMING,\n" " ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::ClientStreamingHandler< " " new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n" "$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ServerOnlyStreaming(method.get())) { } else if (ServerOnlyStreaming(method.get())) {
printer->Print( printer->Print(
*vars, *vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n" "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n" " $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::SERVER_STREAMING,\n" " ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::ServerStreamingHandler< " " new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n" "$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n" "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n" " $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::BIDI_STREAMING,\n" " ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::BidiStreamingHandler< " " new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n" "$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} }

@ -0,0 +1,158 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#define DEFAULT_POLL_INTERVAL_MS 5000
typedef struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
gpr_mu* pollset_mu;
grpc_pollset* pollset; // guarded by pollset_mu
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
} backup_poller;
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;
static backup_poller* g_poller = NULL; // guarded by g_poller_mu
// g_poll_interval_ms is set only once at the first time
// grpc_client_channel_start_backup_polling() is called, after that it is
// treated as const.
static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS;
static void init_globals() {
gpr_mu_init(&g_poller_mu);
char* env = gpr_getenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS");
if (env != NULL) {
int poll_interval_ms = gpr_parse_nonnegative_int(env);
if (poll_interval_ms == -1) {
gpr_log(GPR_ERROR,
"Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %s, "
"default value %d will be used.",
env, g_poll_interval_ms);
} else {
g_poll_interval_ms = poll_interval_ms;
}
}
gpr_free(env);
}
static void backup_poller_shutdown_unref(grpc_exec_ctx* exec_ctx,
backup_poller* p) {
if (gpr_unref(&p->shutdown_refs)) {
grpc_pollset_destroy(exec_ctx, p->pollset);
gpr_free(p->pollset);
gpr_free(p);
}
}
static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
backup_poller_shutdown_unref(exec_ctx, (backup_poller*)arg);
}
static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
if (gpr_unref(&g_poller->refs)) {
gpr_mu_lock(&g_poller_mu);
backup_poller* p = g_poller;
g_poller = NULL;
gpr_mu_unlock(&g_poller_mu);
gpr_mu_lock(p->pollset_mu);
p->shutting_down = true;
grpc_pollset_shutdown(exec_ctx, p->pollset,
GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller,
p, grpc_schedule_on_exec_ctx));
gpr_mu_unlock(p->pollset_mu);
grpc_timer_cancel(exec_ctx, &p->polling_timer);
}
}
static void run_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
backup_poller* p = (backup_poller*)arg;
if (error != GRPC_ERROR_NONE) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
}
backup_poller_shutdown_unref(exec_ctx, p);
return;
}
gpr_mu_lock(p->pollset_mu);
if (p->shutting_down) {
gpr_mu_unlock(p->pollset_mu);
backup_poller_shutdown_unref(exec_ctx, p);
return;
}
grpc_error* err = grpc_pollset_work(exec_ctx, p->pollset, NULL,
grpc_exec_ctx_now(exec_ctx));
gpr_mu_unlock(p->pollset_mu);
GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
grpc_timer_init(exec_ctx, &p->polling_timer,
grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
&p->run_poller_closure);
}
void grpc_client_channel_start_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
gpr_once_init(&g_once, init_globals);
if (g_poll_interval_ms == 0) {
return;
}
gpr_mu_lock(&g_poller_mu);
if (g_poller == NULL) {
g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
g_poller->shutting_down = false;
grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
gpr_ref_init(&g_poller->refs, 0);
// one for timer cancellation, one for pollset shutdown
gpr_ref_init(&g_poller->shutdown_refs, 2);
GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &g_poller->polling_timer,
grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
&g_poller->run_poller_closure);
}
gpr_ref(&g_poller->refs);
gpr_mu_unlock(&g_poller_mu);
grpc_pollset_set_add_pollset(exec_ctx, interested_parties, g_poller->pollset);
}
void grpc_client_channel_stop_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
if (g_poll_interval_ms == 0) {
return;
}
grpc_pollset_set_del_pollset(exec_ctx, interested_parties, g_poller->pollset);
g_poller_unref(exec_ctx);
}

@ -0,0 +1,34 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
#include <grpc/grpc.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/exec_ctx.h"
/* Start polling \a interested_parties periodically in the timer thread */
void grpc_client_channel_start_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
/* Stop polling \a interested_parties */
void grpc_client_channel_stop_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */

@ -31,6 +31,7 @@
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" #include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
@ -712,6 +713,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties = grpc_pollset_set_create(); chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel"); "client_channel");
grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory. // Record client channel factory.
const grpc_arg *arg = grpc_channel_args_find(args->channel_args, const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY); GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@ -790,6 +792,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
if (chand->method_params_table != NULL) { if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
} }
grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(exec_ctx, chand->interested_parties); grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel"); GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
@ -898,7 +901,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s", "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count, elem->channel_data, calld, calld->waiting_for_pick_batches_count,
grpc_error_string(error)); grpc_error_string(error));
} }
@ -940,7 +943,7 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
channel_data *chand = (channel_data *)elem->channel_data; channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p", " pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count, chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call); calld->subchannel_call);

@ -611,7 +611,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_SHUTDOWN: case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE); GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
break; break;
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
@ -1790,7 +1789,6 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
// embedded RR policy. Note that the current RR policy, if any, will stay in // embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received. // effect until an update from the new lb_call is received.
switch (glb_policy->lb_channel_connectivity) { switch (glb_policy->lb_channel_connectivity) {
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: { case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */ /* resub. */

@ -20,6 +20,7 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/ext/filters/client_channel/subchannel_index.h"
@ -42,103 +43,73 @@ typedef struct {
/** base policy: must be first */ /** base policy: must be first */
grpc_lb_policy base; grpc_lb_policy base;
/** all our subchannels */ /** all our subchannels */
grpc_subchannel **subchannels; grpc_lb_subchannel_list *subchannel_list;
grpc_subchannel **new_subchannels; /** latest pending subchannel list */
size_t num_subchannels; grpc_lb_subchannel_list *latest_pending_subchannel_list;
size_t num_new_subchannels; /** selected subchannel in \a subchannel_list */
grpc_lb_subchannel_data *selected;
grpc_closure connectivity_changed;
/** remaining members are protected by the combiner */
/** the selected channel */
grpc_connected_subchannel *selected;
/** the subchannel key for \a selected, or NULL if \a selected not set */
const grpc_subchannel_key *selected_key;
/** have we started picking? */ /** have we started picking? */
bool started_picking; bool started_picking;
/** are we shut down? */ /** are we shut down? */
bool shutdown; bool shutdown;
/** are we updating the selected subchannel? */
bool updating_selected;
/** are we updating the subchannel candidates? */
bool updating_subchannels;
/** args from the latest update received while already updating, or NULL */
grpc_lb_policy_args *pending_update_args;
/** which subchannel are we watching? */
size_t checking_subchannel;
/** what is the connectivity of that channel? */
grpc_connectivity_state checking_connectivity;
/** list of picks that are waiting on connectivity */ /** list of picks that are waiting on connectivity */
pending_pick *pending_picks; pending_pick *pending_picks;
/** our connectivity state tracker */ /** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker; grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy; } pick_first_lb_policy;
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL); GPR_ASSERT(p->pending_picks == NULL);
for (size_t i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first_destroy");
}
if (p->selected != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"picked_first_destroy");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker); grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
if (p->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
gpr_free(p->subchannels);
gpr_free(p->new_subchannels);
gpr_free(p); gpr_free(p);
grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p); gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
} }
} }
static void fail_pending_picks_for_shutdown(grpc_exec_ctx *exec_ctx, static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
pick_first_lb_policy *p) { grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
pending_pick *pp; pending_pick *pp;
while ((pp = p->pending_picks) != NULL) { while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp); gpr_free(pp);
} }
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"shutdown");
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_shutdown");
p->subchannel_list = NULL;
}
if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
p->latest_pending_subchannel_list = NULL;
}
GRPC_ERROR_UNREF(error);
} }
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
p->shutdown = true; GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
fail_pending_picks_for_shutdown(exec_ctx, p);
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else if (p->num_subchannels > 0 && p->started_picking) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
} }
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target, grpc_connected_subchannel **target,
grpc_error *error) { grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp; pending_pick *pp = p->pending_picks;
pp = p->pending_picks;
p->pending_picks = NULL; p->pending_picks = NULL;
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
@ -162,8 +133,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_eq, uint32_t initial_metadata_flags_eq,
grpc_error *error) { grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp; pending_pick *pp = p->pending_picks;
pp = p->pending_picks;
p->pending_picks = NULL; p->pending_picks = NULL;
while (pp != NULL) { while (pp != NULL) {
pending_pick *next = pp->next; pending_pick *next = pp->next;
@ -185,15 +155,12 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void start_picking_locked(grpc_exec_ctx *exec_ctx, static void start_picking_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) { pick_first_lb_policy *p) {
p->started_picking = true; p->started_picking = true;
if (p->subchannels != NULL) { if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
GPR_ASSERT(p->num_subchannels > 0); p->subchannel_list->checking_subchannel = 0;
p->checking_subchannel = 0; grpc_lb_subchannel_list_ref_for_connectivity_watch(
p->checking_connectivity = GRPC_CHANNEL_IDLE; p->subchannel_list, "connectivity_watch+start_picking");
GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity"); grpc_lb_subchannel_data_start_connectivity_watch(
grpc_subchannel_notify_on_state_change( exec_ctx, &p->subchannel_list->subchannels[0]);
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
} }
} }
@ -210,19 +177,17 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_call_context_element *context, void **user_data, grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) { grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp; // If we have a selected subchannel already, return synchronously.
/* Check atomically for a selected channel */
if (p->selected != NULL) { if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked"); *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
return 1; return 1;
} }
// No subchannel selected yet, so handle asynchronously.
/* No subchannel selected yet, so try again */
if (!p->started_picking) { if (!p->started_picking) {
start_picking_locked(exec_ctx, p); start_picking_locked(exec_ctx, p);
} }
pp = (pending_pick *)gpr_malloc(sizeof(*pp)); pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks; pp->next = p->pending_picks;
pp->target = target; pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags; pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@ -231,19 +196,15 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0; return 0;
} }
static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx, static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) { pick_first_lb_policy *p) {
size_t num_subchannels = p->num_subchannels; for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
grpc_subchannel **subchannels = p->subchannels; grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
if (p->selected != sd) {
p->num_subchannels = 0; grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
p->subchannels = NULL; "selected_different_subchannel");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels"); }
for (size_t i = 0; i < num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
} }
gpr_free(subchannels);
} }
static grpc_connectivity_state pf_check_connectivity_locked( static grpc_connectivity_state pf_check_connectivity_locked(
@ -265,46 +226,24 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) { grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
if (p->selected) { if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure); grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
closure);
} else { } else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
} }
} }
/* unsubscribe all subchannels */ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx, grpc_error *error);
pick_first_lb_policy *p) {
if (p->num_subchannels > 0) {
GPR_ASSERT(p->selected == NULL);
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
(void *)p, (void *)p->subchannels[p->checking_subchannel]);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
p->updating_subchannels = true;
} else if (p->selected != NULL) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p unsubscribing from selected subchannel %p",
(void *)p, (void *)p->selected);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
p->updating_selected = true;
}
}
/* true upon success */
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) { const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy; pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
const grpc_arg *arg = const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) { if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (p->subchannels == NULL) { if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE. // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -321,270 +260,222 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
} }
const grpc_lb_addresses *addresses = const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p; (const grpc_lb_addresses *)arg->value.pointer.p;
if (addresses->num_addresses == 0) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
// Empty update. Unsubscribe from all current subchannels and put the gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
// channel in TRANSIENT_FAILURE. (void *)p, (unsigned long)addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
pf_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
// Empty update or no valid subchannels. Unsubscribe from all current
// subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty"); "pf_update_empty");
stop_connectivity_watchers(exec_ctx, p); if (p->subchannel_list != NULL) {
return; grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
} }
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { p->subchannel_list = subchannel_list; // Empty list.
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses", p->selected = NULL;
(void *)p, (unsigned long)addresses->num_addresses); return;
}
grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc(
sizeof(*sc_args) * addresses->num_addresses);
/* We remove the following keys in order for subchannel keys belonging to
* subchannels point to the same address to match. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
size_t sc_args_count = 0;
/* Create list of subchannel args for new addresses in \a args. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
} }
grpc_arg addr_arg = if (p->selected == NULL) {
grpc_create_subchannel_address_arg(&addresses->addresses[i].address); // We don't yet have a selected subchannel, so replace the current
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove( // subchannel list immediately.
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, if (p->subchannel_list != NULL) {
1); grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
gpr_free(addr_arg.value.string); "pf_update_before_selected");
sc_args[sc_args_count++].args = new_args;
} }
p->subchannel_list = subchannel_list;
/* Check if p->selected is amongst them. If so, we are done. */ } else {
if (p->selected != NULL) { // We do have a selected subchannel.
GPR_ASSERT(p->selected_key != NULL); // Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < sc_args_count; i++) { for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
grpc_subchannel_key *ith_sc_key = grpc_subchannel_key_create(&sc_args[i]); grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
const bool found_selected = if (sd->subchannel == p->selected->subchannel) {
grpc_subchannel_key_compare(p->selected_key, ith_sc_key) == 0;
grpc_subchannel_key_destroy(exec_ctx, ith_sc_key);
if (found_selected) {
// The currently selected subchannel is in the update: we are done. // The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Pick First %p found already selected subchannel %p amongst " "Pick First %p found already selected subchannel %p "
"updates. Update done.", "at update index %" PRIuPTR " of %" PRIuPTR "; update done",
(void *)p, (void *)p->selected); p, p->selected->subchannel, i,
} subchannel_list->num_subchannels);
for (size_t j = 0; j < sc_args_count; j++) { }
grpc_channel_args_destroy(exec_ctx, grpc_lb_subchannel_list_ref_for_connectivity_watch(
(grpc_channel_args *)sc_args[j].args); subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "pf_update_includes_selected");
}
p->subchannel_list = subchannel_list;
if (p->selected->connected_subchannel != NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "pf_update_includes_selected");
}
p->selected = sd;
destroy_unselected_subchannels_locked(exec_ctx, p);
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"pf_update_includes_selected+outdated");
p->latest_pending_subchannel_list = NULL;
} }
gpr_free(sc_args);
return; return;
} }
} }
} // Not keeping the previous selected subchannel, so set the latest
// We only check for already running updates here because if the previous // pending subchannel list to the new subchannel list. We will wait
// steps were successful, the update can be considered done without any // for it to report READY before swapping it into the current
// interference (ie, no callbacks were scheduled). // subchannel list.
if (p->updating_selected || p->updating_subchannels) { if (p->latest_pending_subchannel_list != NULL) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_DEBUG,
"Update already in progress for pick first %p. Deferring update.", "Pick First %p Shutting down latest pending subchannel list "
(void *)p); "%p, about to be replaced by newer latest %p",
} (void *)p, (void *)p->latest_pending_subchannel_list,
if (p->pending_update_args != NULL) { (void *)subchannel_list);
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
} }
p->pending_update_args = grpc_lb_subchannel_list_shutdown_and_unref(
(grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args)); exec_ctx, p->latest_pending_subchannel_list,
p->pending_update_args->client_channel_factory = "sl_outdated_dont_smash");
args->client_channel_factory;
p->pending_update_args->args = grpc_channel_args_copy(args->args);
p->pending_update_args->combiner = args->combiner;
return;
} }
/* Create the subchannels for the new subchannel args/addresses. */ p->latest_pending_subchannel_list = subchannel_list;
grpc_subchannel **new_subchannels =
(grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
size_t num_new_subchannels = 0;
for (size_t i = 0; i < sc_args_count; i++) {
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args[i]);
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_INFO,
"Pick First %p created subchannel %p for address uri %s",
(void *)p, (void *)subchannel, address_uri);
gpr_free(address_uri);
}
grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)sc_args[i].args);
if (subchannel != NULL) new_subchannels[num_new_subchannels++] = subchannel;
}
gpr_free(sc_args);
if (num_new_subchannels == 0) {
gpr_free(new_subchannels);
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid addresses in update"),
"pf_update_no_valid_addresses");
stop_connectivity_watchers(exec_ctx, p);
return;
} }
// If we've started picking, start trying to connect to the first
/* Destroy the current subchannels. Repurpose pf_shutdown/destroy. */ // subchannel in the new list.
stop_connectivity_watchers(exec_ctx, p);
/* Save new subchannels. The switch over will happen in
* pf_connectivity_changed_locked */
if (p->updating_selected || p->updating_subchannels) {
p->num_new_subchannels = num_new_subchannels;
p->new_subchannels = new_subchannels;
} else { /* nothing is updating. Get things moving from here */
p->num_subchannels = num_new_subchannels;
p->subchannels = new_subchannels;
p->new_subchannels = NULL;
p->num_new_subchannels = 0;
if (p->started_picking) { if (p->started_picking) {
p->checking_subchannel = 0; grpc_lb_subchannel_list_ref_for_connectivity_watch(
p->checking_connectivity = GRPC_CHANNEL_IDLE; subchannel_list, "connectivity_watch+update");
grpc_subchannel_notify_on_state_change( grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, p->subchannels[p->checking_subchannel], exec_ctx, &subchannel_list->subchannels[0]);
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
} }
} }
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)arg; grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
grpc_subchannel *selected_subchannel; pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
pending_pick *pp;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(
GPR_DEBUG,
"Pick First %p connectivity changed. Updating selected: %d; Updating "
"subchannels: %d; Checking %lu index (%lu total); State: %d; ",
(void *)p, p->updating_selected, p->updating_subchannels,
(unsigned long)p->checking_subchannel,
(unsigned long)p->num_subchannels, p->checking_connectivity);
}
bool restart = false;
if (p->updating_selected && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for p->selected */
GPR_ASSERT(p->selected != NULL);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"pf_update_connectivity");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p", gpr_log(GPR_DEBUG,
(void *)p, (void *)p->selected); "Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
} " of %" PRIuPTR
p->updating_selected = false; "), subchannel_list %p: state=%s p->shutdown=%d "
if (p->num_new_subchannels == 0) { "sd->subchannel_list->shutting_down=%d error=%s",
p->selected = NULL; (void *)p, (void *)sd->subchannel,
sd->subchannel_list->checking_subchannel,
sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_shutdown");
return; return;
} }
restart = true; // If the subchannel list is shutting down, stop watching.
} if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
if (p->updating_subchannels && error != GRPC_ERROR_NONE) { grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
/* Captured the unsubscription for the checking subchannel */ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_sl_shutdown");
GPR_ASSERT(p->selected == NULL); grpc_lb_subchannel_list_unref_for_connectivity_watch(
for (size_t i = 0; i < p->num_subchannels; i++) { exec_ctx, sd->subchannel_list, "pf_sl_shutdown");
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
"pf_update_connectivity");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
(void *)p->subchannels[i]);
}
}
gpr_free(p->subchannels);
p->subchannels = NULL;
p->num_subchannels = 0;
p->updating_subchannels = false;
if (p->num_new_subchannels == 0) return;
restart = true;
}
if (restart) {
p->selected = NULL;
p->selected_key = NULL;
GPR_ASSERT(p->new_subchannels != NULL);
GPR_ASSERT(p->num_new_subchannels > 0);
p->num_subchannels = p->num_new_subchannels;
p->subchannels = p->new_subchannels;
p->num_new_subchannels = 0;
p->new_subchannels = NULL;
if (p->started_picking) {
/* If we were picking, continue to do so over the new subchannels,
* starting from the 0th index. */
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
/* reuses the weak ref from start_picking_locked */
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
if (p->pending_update_args != NULL) {
const grpc_lb_policy_args *args = p->pending_update_args;
p->pending_update_args = NULL;
pf_update_locked(exec_ctx, &p->base, args);
}
return; return;
} }
GRPC_ERROR_REF(error); // If we're still here, the notification must be for a subchannel in
if (p->shutdown) { // either the current or latest pending subchannel lists.
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity"); GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
GRPC_ERROR_UNREF(error); sd->subchannel_list == p->latest_pending_subchannel_list);
return; // Update state.
} else if (p->selected != NULL) { sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) { // Handle updates for the currently selected subchannel.
if (p->selected == sd) {
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list != NULL) {
p->selected = NULL;
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
} else {
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */ /* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN; sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN;
} }
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, GRPC_ERROR_REF(error), sd->curr_connectivity_state,
"selected_changed"); GRPC_ERROR_REF(error), "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) { if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change( // Renew notification.
exec_ctx, p->selected, p->base.interested_parties, grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
&p->checking_connectivity, &p->connectivity_changed);
} else { } else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity"); grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
} }
} else { }
loop: return;
switch (p->checking_connectivity) { }
case GRPC_CHANNEL_INIT: // If we get here, there are two possible cases:
GPR_UNREACHABLE_CODE(return ); // 1. We do not currently have a selected subchannel, and the update is
case GRPC_CHANNEL_READY: // for a subchannel in p->subchannel_list that we're trying to
// connect to. The goal here is to find a subchannel that we can
// select.
// 2. We do currently have a selected subchannel, and the update is
// for a subchannel in p->latest_pending_subchannel_list. The
// goal here is to find a subchannel from the update that we can
// select in place of the current one.
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
}
while (true) {
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_READY: {
// Case 2. Promote p->latest_pending_subchannel_list to
// p->subchannel_list.
if (sd->subchannel_list == p->latest_pending_subchannel_list) {
GPR_ASSERT(p->subchannel_list != NULL);
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "finish_update");
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
}
// Cases 1 and 2.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE, GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready"); "connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel]; sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected = GRPC_CONNECTED_SUBCHANNEL_REF( grpc_subchannel_get_connected_subchannel(sd->subchannel),
grpc_subchannel_get_connected_subchannel(selected_subchannel), "connected");
"picked_first"); p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
"Pick First %p selected subchannel %p (connected %p)", (void *)sd->subchannel);
(void *)p, (void *)selected_subchannel, (void *)p->selected); }
} // Drop all other subchannels, since we are now connected.
p->selected_key = grpc_subchannel_get_key(selected_subchannel); destroy_unselected_subchannels_locked(exec_ctx, p);
/* drop the pick list: we are connected now */ // Update any calls that were waiting for a pick.
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels"); pending_pick *pp;
destroy_subchannels_locked(exec_ctx, p);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked"); *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p", "Servicing pending pick with selected subchannel %p",
@ -593,71 +484,86 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp); gpr_free(pp);
} }
grpc_connected_subchannel_notify_on_state_change( // Renew notification.
exec_ctx, p->selected, p->base.interested_parties, grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
&p->checking_connectivity, &p->connectivity_changed); return;
break; }
case GRPC_CHANNEL_TRANSIENT_FAILURE: case GRPC_CHANNEL_TRANSIENT_FAILURE: {
p->checking_subchannel = do {
(p->checking_subchannel + 1) % p->num_subchannels; sd->subchannel_list->checking_subchannel =
if (p->checking_subchannel == 0) { (sd->subchannel_list->checking_subchannel + 1) %
/* only trigger transient failure when we've tried all alternatives sd->subchannel_list->num_subchannels;
*/ sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == NULL);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure"); GRPC_ERROR_REF(error), "connecting_transient_failure");
} }
sd->curr_connectivity_state =
grpc_subchannel_check_connectivity(sd->subchannel, &error);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
p->checking_connectivity = grpc_subchannel_check_connectivity( if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
p->subchannels[p->checking_subchannel], &error); // Reuses the connectivity refs from the previous watch.
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) { grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
grpc_subchannel_notify_on_state_change( return;
exec_ctx, p->subchannels[p->checking_subchannel], }
p->base.interested_parties, &p->checking_connectivity, break; // Go back to top of loop.
&p->connectivity_changed);
} else {
goto loop;
} }
break;
case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_IDLE: {
// Only update connectivity state in case 1.
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING, exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_REF(error), "connecting_changed"); GRPC_ERROR_REF(error), "connecting_changed");
grpc_subchannel_notify_on_state_change( }
exec_ctx, p->subchannels[p->checking_subchannel], // Renew notification.
p->base.interested_parties, &p->checking_connectivity, grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
&p->connectivity_changed); return;
break; }
case GRPC_CHANNEL_SHUTDOWN: case GRPC_CHANNEL_SHUTDOWN: {
p->num_subchannels--; grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel], "pf_candidate_shutdown");
p->subchannels[p->num_subchannels]); // Advance to next subchannel and check its state.
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels], grpc_lb_subchannel_data *original_sd = sd;
"pick_first"); do {
if (p->num_subchannels == 0) { sd->subchannel_list->checking_subchannel =
grpc_connectivity_state_set( (sd->subchannel_list->checking_subchannel + 1) %
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN, sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == NULL && sd != original_sd);
if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick first exhausted channels", &error, 1), "Pick first exhausted channels", &error, 1));
"no_more_channels"); return;
fail_pending_picks_for_shutdown(exec_ctx, p); }
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, if (sd->subchannel_list == p->subchannel_list) {
"pick_first_connectivity");
} else {
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed"); GRPC_ERROR_REF(error), "subchannel_failed");
p->checking_subchannel %= p->num_subchannels; }
sd->curr_connectivity_state =
grpc_subchannel_check_connectivity(sd->subchannel, &error);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
p->checking_connectivity = grpc_subchannel_check_connectivity( if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
p->subchannels[p->checking_subchannel], &error); // Reuses the connectivity refs from the previous watch.
goto loop; grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
return;
}
// For any other state, go back to top of loop.
// We will reuse the connectivity refs from the previous watch.
} }
} }
} }
GRPC_ERROR_UNREF(error);
} }
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = { static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
@ -687,8 +593,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
pf_update_locked(exec_ctx, &p->base, args); pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner); grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref(); grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner));
return &p->base; return &p->base;
} }

@ -28,6 +28,7 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/ext/filters/client_channel/subchannel_index.h"
@ -64,12 +65,11 @@ typedef struct pending_pick {
grpc_closure *on_complete; grpc_closure *on_complete;
} pending_pick; } pending_pick;
typedef struct rr_subchannel_list rr_subchannel_list;
typedef struct round_robin_lb_policy { typedef struct round_robin_lb_policy {
/** base policy: must be first */ /** base policy: must be first */
grpc_lb_policy base; grpc_lb_policy base;
rr_subchannel_list *subchannel_list; grpc_lb_subchannel_list *subchannel_list;
/** have we started picking? */ /** have we started picking? */
bool started_picking; bool started_picking;
@ -89,157 +89,9 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words, * lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any * racing callbacks that reference outdated subchannel lists won't perform any
* update. */ * update. */
rr_subchannel_list *latest_pending_subchannel_list; grpc_lb_subchannel_list *latest_pending_subchannel_list;
} round_robin_lb_policy; } round_robin_lb_policy;
typedef struct {
/** backpointer to owning subchannel list */
rr_subchannel_list *subchannel_list;
/** subchannel itself */
grpc_subchannel *subchannel;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** last observed connectivity. Not updated by
* \a grpc_subchannel_notify_on_state_change. Used to determine the previous
* state while processing the new state in \a rr_connectivity_changed */
grpc_connectivity_state prev_connectivity_state;
/** current connectivity state. Updated by \a
* grpc_subchannel_notify_on_state_change */
grpc_connectivity_state curr_connectivity_state;
/** connectivity state to be updated by the watcher, not guarded by
* the combiner. Will be moved to curr_connectivity_state inside of
* the combiner by rr_connectivity_changed_locked(). */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct rr_subchannel_list {
/** backpointer to owning policy */
round_robin_lb_policy *policy;
/** all our subchannels */
size_t num_subchannels;
subchannel_data *subchannels;
/** how many subchannels are in state READY */
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are in state SHUTDOWN */
size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
/** There will be one ref for each entry in subchannels for which there is a
* pending connectivity state watcher callback. */
gpr_refcount refcount;
/** Is this list shutting down? This may be true due to the shutdown of the
* policy itself or because a newer update has arrived while this one hadn't
* finished processing. */
bool shutting_down;
};
static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
size_t num_subchannels) {
rr_subchannel_list *subchannel_list =
(rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
subchannel_list->policy = p;
subchannel_list->subchannels =
(subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
subchannel_list->num_subchannels = num_subchannels;
gpr_ref_init(&subchannel_list->refcount, 1);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels",
(void *)p, (void *)subchannel_list, (unsigned long)num_subchannels);
}
return subchannel_list;
}
static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
rr_subchannel_list *subchannel_list) {
GPR_ASSERT(subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Destroying subchannel_list %p",
(void *)subchannel_list->policy, (void *)subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel != NULL) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel,
"rr_subchannel_list_destroy");
}
sd->subchannel = NULL;
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
}
gpr_free(subchannel_list->subchannels);
gpr_free(subchannel_list);
}
static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list,
const char *reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list,
(unsigned long)(count - 1), (unsigned long)count, reason);
}
}
static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
rr_subchannel_list *subchannel_list,
const char *reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list,
(unsigned long)(count + 1), (unsigned long)count, reason);
}
if (done) {
rr_subchannel_list_destroy(exec_ctx, subchannel_list);
}
}
/** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The
* watcher's callback will ultimately unref \a subchannel_list. */
static void rr_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list,
const char *reason) {
GPR_ASSERT(!subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe.
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] Unsubscribing from subchannel %p as part of shutting down "
"subchannel_list %p",
(void *)subchannel_list->policy, (void *)sd->subchannel,
(void *)subchannel_list);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
NULL,
&sd->connectivity_changed_closure);
}
}
rr_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
/** Returns the index into p->subchannel_list->subchannels of the next /** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no * subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY. * subchannel is READY.
@ -299,8 +151,8 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)", "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void *)p, (unsigned long)last_ready_index, (void *)p, (unsigned long)last_ready_index,
(void *)p->subchannel_list->subchannels[last_ready_index].subchannel, (void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void *)grpc_subchannel_get_connected_subchannel( (void *)p->subchannel_list->subchannels[last_ready_index]
p->subchannel_list->subchannels[last_ready_index].subchannel)); .connected_subchannel);
} }
} }
@ -310,47 +162,47 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p", gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void *)pol, (void *)pol); (void *)pol, (void *)pol);
} }
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker); grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref(); grpc_subchannel_index_unref();
gpr_free(p); gpr_free(p);
} }
static void fail_pending_picks_for_shutdown(grpc_exec_ctx *exec_ctx, static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
round_robin_lb_policy *p) { grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick *pp; pending_pick *pp;
while ((pp = p->pending_picks) != NULL) { while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = NULL; *pp->target = NULL;
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp); gpr_free(pp);
} }
} grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { "rr_shutdown");
round_robin_lb_policy *p = (round_robin_lb_policy *)pol; if (p->subchannel_list != NULL) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
(void *)pol, (void *)pol);
}
p->shutdown = true;
fail_pending_picks_for_shutdown(exec_ctx, p);
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
const bool latest_is_current =
p->subchannel_list == p->latest_pending_subchannel_list;
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_rr_shutdown"); "sl_shutdown_rr_shutdown");
p->subchannel_list = NULL; p->subchannel_list = NULL;
if (!latest_is_current && p->latest_pending_subchannel_list != NULL && }
!p->latest_pending_subchannel_list->shutting_down) { if (p->latest_pending_subchannel_list != NULL) {
rr_subchannel_list_shutdown_and_unref(exec_ctx, grpc_lb_subchannel_list_shutdown_and_unref(
p->latest_pending_subchannel_list, exec_ctx, p->latest_pending_subchannel_list,
"sl_shutdown_pending_rr_shutdown"); "sl_shutdown_pending_rr_shutdown");
p->latest_pending_subchannel_list = NULL; p->latest_pending_subchannel_list = NULL;
} }
GRPC_ERROR_UNREF(error);
}
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
} }
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@ -405,13 +257,10 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) { round_robin_lb_policy *p) {
p->started_picking = true; p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) { for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &p->subchannel_list->subchannels[i]; grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked"); "connectivity_watch");
rr_subchannel_list_ref(sd->subchannel_list, "started_picking"); grpc_lb_subchannel_data_start_connectivity_watch(
grpc_subchannel_notify_on_state_change( exec_ctx, &p->subchannel_list->subchannels[i]);
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
} }
} }
@ -436,10 +285,10 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) { if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */ /* readily available, report right away */
subchannel_data *sd = &p->subchannel_list->subchannels[next_ready_index]; grpc_lb_subchannel_data *sd =
*target = GRPC_CONNECTED_SUBCHANNEL_REF( &p->subchannel_list->subchannels[next_ready_index];
grpc_subchannel_get_connected_subchannel(sd->subchannel), *target =
"rr_picked"); GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != NULL) { if (user_data != NULL) {
*user_data = sd->user_data; *user_data = sd->user_data;
} }
@ -470,8 +319,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0; return 0;
} }
static void update_state_counters_locked(subchannel_data *sd) { static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
rr_subchannel_list *subchannel_list = sd->subchannel_list; grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) { if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0); GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready; --subchannel_list->num_ready;
@ -485,6 +334,7 @@ static void update_state_counters_locked(subchannel_data *sd) {
GPR_ASSERT(subchannel_list->num_idle > 0); GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle; --subchannel_list->num_idle;
} }
sd->prev_connectivity_state = sd->curr_connectivity_state;
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
++subchannel_list->num_ready; ++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { } else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
@ -497,12 +347,12 @@ static void update_state_counters_locked(subchannel_data *sd) {
} }
/** Sets the policy's connectivity status based on that of the passed-in \a sd /** Sets the policy's connectivity status based on that of the passed-in \a sd
* (the subchannel_data associted with the updated subchannel) and the * (the grpc_lb_subchannel_data associted with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be * subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the * used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */ * connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked( static grpc_connectivity_state update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) { grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we /* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled). * are on rule n, all previous rules were unfulfilled).
* *
@ -524,8 +374,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels. * CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/ */
grpc_connectivity_state new_state = sd->curr_connectivity_state; grpc_connectivity_state new_state = sd->curr_connectivity_state;
rr_subchannel_list *subchannel_list = sd->subchannel_list; grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = subchannel_list->policy; round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */ if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY, grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready"); GRPC_ERROR_NONE, "rr_ready");
@ -561,8 +411,9 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
subchannel_data *sd = (subchannel_data *)arg; grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
round_robin_lb_policy *p = sd->subchannel_list->policy; round_robin_lb_policy *p =
(round_robin_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_DEBUG,
@ -577,65 +428,50 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
} }
// If the policy is shutting down, unref and return. // If the policy is shutting down, unref and return.
if (p->shutdown) { if (p->shutdown) {
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
"pol_shutdown+started_picking"); grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_shutdown");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown"); grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "rr_shutdown");
return; return;
} }
if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) { // If the subchannel list is shutting down, stop watching.
// the subchannel list associated with sd has been discarded. This callback if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
// corresponds to the unsubscription. The unrefs correspond to the picking grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
// ref (start_picking_locked or update_started_picking). grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_sl_shutdown");
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, grpc_lb_subchannel_list_unref_for_connectivity_watch(
"sl_shutdown+started_picking"); exec_ctx, sd->subchannel_list, "rr_sl_shutdown");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking");
return;
}
// Dispose of outdated subchannel lists.
if (sd->subchannel_list != p->subchannel_list &&
sd->subchannel_list != p->latest_pending_subchannel_list) {
const char *reason = NULL;
if (sd->subchannel_list->shutting_down) {
reason = "sl_outdated_straggler";
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);
} else {
reason = "sl_outdated";
rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list,
reason);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, reason);
return; return;
} }
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
sd->subchannel_list == p->latest_pending_subchannel_list);
// Now that we're inside the combiner, copy the pending connectivity // Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to // state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner. // curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe; sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Update state counters and determine new overall state. // Update state counters and determine new overall state.
update_state_counters_locked(sd); update_state_counters_locked(sd);
sd->prev_connectivity_state = sd->curr_connectivity_state;
const grpc_connectivity_state new_policy_connectivity_state = const grpc_connectivity_state new_policy_connectivity_state =
update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error)); update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
// If the sd's new state is SHUTDOWN, unref the subchannel, and if the new // If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
// policy's state is SHUTDOWN, clean up. // policy's state is SHUTDOWN, clean up.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown"); grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
sd->subchannel = NULL; grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
if (sd->user_data != NULL) { "rr_connectivity_shutdown");
GPR_ASSERT(sd->user_data_vtable != NULL); grpc_lb_subchannel_list_unref_for_connectivity_watch(
sd->user_data_vtable->destroy(exec_ctx, sd->user_data); exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
sd->user_data = NULL;
}
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
// The policy is shutting down. Fail all of the pending picks. shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
fail_pending_picks_for_shutdown(exec_ctx, p); }
}
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
"sd_shutdown+started_picking");
// unref the "rr_connectivity_update" weak ref from start_picking.
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"rr_connectivity_sd_shutdown");
} else { // sd not in SHUTDOWN } else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
if (sd->connected_subchannel == NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
}
if (sd->subchannel_list != p->subchannel_list) { if (sd->subchannel_list != p->subchannel_list) {
// promote sd->subchannel_list to p->subchannel_list. // promote sd->subchannel_list to p->subchannel_list.
// sd->subchannel_list must be equal to // sd->subchannel_list must be equal to
@ -656,8 +492,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
} }
if (p->subchannel_list != NULL) { if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list // dispose of the current subchannel_list
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, grpc_lb_subchannel_list_shutdown_and_unref(
"sl_phase_out_shutdown"); exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
} }
p->subchannel_list = p->latest_pending_subchannel_list; p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL; p->latest_pending_subchannel_list = NULL;
@ -667,7 +503,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */ * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels); GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
subchannel_data *selected = grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index]; &p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) { if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending // if the selected subchannel is going to be used for the pending
@ -678,8 +514,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) { while ((pp = p->pending_picks)) {
p->pending_picks = pp->next; p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF( *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel), selected->connected_subchannel, "rr_picked");
"rr_picked");
if (pp->user_data != NULL) { if (pp->user_data != NULL) {
*pp->user_data = selected->user_data; *pp->user_data = selected->user_data;
} }
@ -694,12 +529,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(pp); gpr_free(pp);
} }
} }
/* renew notification: reuses the "rr_connectivity_update" weak ref on the // Renew notification.
* policy as well as the sd->subchannel_list ref. */ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
} }
} }
@ -723,13 +554,12 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
round_robin_lb_policy *p = (round_robin_lb_policy *)pol; round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) { if (next_ready_index < p->subchannel_list->num_subchannels) {
subchannel_data *selected = grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index]; &p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF( grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel), selected->connected_subchannel, "rr_ping");
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure); grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked"); GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else { } else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected")); "Round Robin not connected"));
@ -742,130 +572,68 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_arg *arg = const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) { if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update).
if (p->subchannel_list == NULL) { if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"rr_update_missing"); "rr_update_missing");
} else {
// otherwise, keep using the current subchannel list (ignore this update).
gpr_log(GPR_ERROR,
"[RR %p] No valid LB addresses channel arg for update, ignoring.",
(void *)p);
} }
return; return;
} }
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p; grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
rr_subchannel_list *subchannel_list = if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
rr_subchannel_list_create(p, addresses->num_addresses); gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
if (addresses->num_addresses == 0) { addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
rr_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty"); "rr_update_empty");
if (p->subchannel_list != NULL) { if (p->subchannel_list != NULL) {
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update"); "sl_shutdown_empty_update");
} }
p->subchannel_list = subchannel_list; // empty list p->subchannel_list = subchannel_list; // empty list
return; return;
} }
size_t subchannel_index = 0; if (p->started_picking) {
if (p->latest_pending_subchannel_list != NULL && p->started_picking) { if (p->latest_pending_subchannel_list != NULL) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, about " "[RR %p] Shutting down latest pending subchannel list %p, "
"to be replaced by newer latest %p", "about to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list, (void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list); (void *)subchannel_list);
} }
rr_subchannel_list_shutdown_and_unref( grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash"); exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
} }
p->latest_pending_subchannel_list = subchannel_list; p->latest_pending_subchannel_list = subchannel_list;
grpc_subchannel_args sc_args; for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
/* We need to remove the LB addresses in order to be able to compare the /* Watch every new subchannel. A subchannel list becomes active the
* subchannel keys of subchannels from a different batch of addresses. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
/* Create subchannels for addresses in the update. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
grpc_error *error;
// Get the connectivity state of the subchannel. Already existing ones may
// be in a state other than INIT.
const grpc_connectivity_state subchannel_connectivity_state =
grpc_subchannel_check_connectivity(subchannel, &error);
if (error != GRPC_ERROR_NONE) {
// The subchannel is in error (e.g. shutting down). Ignore it.
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error");
GRPC_ERROR_UNREF(error);
continue;
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(
GPR_DEBUG,
"[RR %p] index %lu: Created subchannel %p for address uri %s into "
"subchannel_list %p. Connectivity state %s",
(void *)p, (unsigned long)subchannel_index, (void *)subchannel,
address_uri, (void *)subchannel_list,
grpc_connectivity_state_name(subchannel_connectivity_state));
gpr_free(address_uri);
}
subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner));
/* use some sentinel value outside of the range of
* grpc_connectivity_state to signal an undefined previous state. We
* won't be referring to this value again and it'll be overwritten after
* the first call to rr_connectivity_changed_locked */
sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
sd->curr_connectivity_state = subchannel_connectivity_state;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
if (p->started_picking) {
rr_subchannel_list_ref(sd->subchannel_list, "update_started_picking");
GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity_update");
/* 2. Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap * moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel * p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */ * list is still valid (ie, isn't shutting down) */
grpc_subchannel_notify_on_state_change( grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
exec_ctx, sd->subchannel, p->base.interested_parties, "connectivity_watch");
&sd->pending_connectivity_state_unsafe, grpc_lb_subchannel_data_start_connectivity_watch(
&sd->connectivity_changed_closure); exec_ctx, &subchannel_list->subchannels[i]);
}
} }
if (!p->started_picking) { } else {
// The policy isn't picking yet. Save the update for later, disposing of // The policy isn't picking yet. Save the update for later, disposing of
// previous version if any. // previous version if any.
if (p->subchannel_list != NULL) { if (p->subchannel_list != NULL) {
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, grpc_lb_subchannel_list_shutdown_and_unref(
"rr_update_before_started_picking"); exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
} }
p->subchannel_list = subchannel_list; p->subchannel_list = subchannel_list;
p->latest_pending_subchannel_list = NULL;
} }
} }

@ -0,0 +1,265 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <string.h>
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
" of %" PRIuPTR " (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
sd->subchannel = NULL;
if (sd->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
reason);
sd->connected_subchannel = NULL;
}
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
}
}
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): requesting connectivity change notification",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
sd->connectivity_notification_pending = true;
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, sd->subchannel_list->policy->interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list *subchannel_list =
(grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
if (GRPC_TRACER_ON(*tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer->name, p, subchannel_list, addresses->num_addresses);
}
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
size_t subchannel_index = 0;
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel == NULL) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
tracer->name, subchannel_list->policy, address_uri);
gpr_free(address_uri);
}
continue;
}
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
grpc_lb_subchannel_data *sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
connectivity_changed_cb, sd,
grpc_combiner_scheduler(args->combiner));
// We assume that the current state is IDLE. If not, we'll get a
// callback telling us that.
sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
}
subchannel_list->num_subchannels = subchannel_index;
subchannel_list->num_idle = subchannel_index;
return subchannel_list;
}
static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"subchannel_list_destroy");
}
gpr_free(subchannel_list->subchannels);
gpr_free(subchannel_list);
}
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
reason);
}
}
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
reason);
}
if (done) {
subchannel_list_destroy(exec_ctx, subchannel_list);
}
}
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
if (sd->connectivity_notification_pending) {
subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
} else if (sd->subchannel != NULL) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
}
}
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}

@ -0,0 +1,153 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/connectivity_state.h"
// TODO(roth): This code is intended to be shared between pick_first and
// round_robin. However, the interface needs more work to provide clean
// encapsulation. For example, the structs here have some fields that are
// only used in one of the two (e.g., the state counters in
// grpc_lb_subchannel_list and the prev_connectivity_state field in
// grpc_lb_subchannel_data are only used in round_robin, and the
// checking_subchannel field in grpc_lb_subchannel_list is only used by
// pick_first). Also, there is probably some code duplication between the
// connectivity state notification callback code in both pick_first and
// round_robin that could be refactored and moved here. In a future PR,
// need to clean this up.
#ifdef __cplusplus
extern "C" {
#endif
typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
grpc_lb_subchannel_list *subchannel_list;
/** subchannel itself */
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** previous and current connectivity states. Updated by \a
* \a connectivity_changed_closure based on
* \a pending_connectivity_state_unsafe. */
grpc_connectivity_state prev_connectivity_state;
grpc_connectivity_state curr_connectivity_state;
/** connectivity state to be updated by
* grpc_subchannel_notify_on_state_change(), not guarded by
* the combiner. To be copied to \a curr_connectivity_state by
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
grpc_lb_policy *policy;
grpc_tracer_flag *tracer;
/** all our subchannels */
size_t num_subchannels;
grpc_lb_subchannel_data *subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
// TODO(roth): When we have time, we can probably make this go away
// and compute the index dynamically by subtracting
// subchannel_list->subchannels from the subchannel_data pointer.
size_t checking_subchannel;
/** how many subchannels are in state READY */
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are in state SHUTDOWN */
size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
/** There will be one ref for each entry in subchannels for which there is a
* pending connectivity state watcher callback. */
gpr_refcount refcount;
/** Is this list shutting down? This may be true due to the shutdown of the
* policy itself or because a newer update has arrived while this one hadn't
* finished processing. */
bool shutting_down;
};
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_iomgr_cb_func connectivity_changed_cb);
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason);
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason);
/// Takes and releases refs needed for a connectivity notification.
/// This includes a ref to subchannel_list and a weak ref to the LB policy.
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason);
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */

@ -127,8 +127,8 @@ void grpc_connected_subchannel_process_transport_op(
grpc_connectivity_state grpc_subchannel_check_connectivity( grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel *channel, grpc_error **error); grpc_subchannel *channel, grpc_error **error);
/** call notify when the connectivity state of a channel changes from *state. /** Calls notify when the connectivity state of a channel becomes different
Updates *state with the new state of the channel */ from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change( void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel, grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state, grpc_pollset_set *interested_parties, grpc_connectivity_state *state,

@ -32,6 +32,12 @@ struct grpc_chttp2_stream;
extern "C" grpc_tracer_flag grpc_flowctl_trace; extern "C" grpc_tracer_flag grpc_flowctl_trace;
namespace grpc {
namespace testing {
class TrickledCHTTP2; // to make this a friend
} // namespace testing
} // namespace grpc
namespace grpc_core { namespace grpc_core {
namespace chttp2 { namespace chttp2 {
@ -203,6 +209,7 @@ class TransportFlowControl {
} }
private: private:
friend class ::grpc::testing::TrickledCHTTP2;
double TargetLogBdp(); double TargetLogBdp();
double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value); double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value);
FlowControlAction::Urgency DeltaUrgency(int32_t value, FlowControlAction::Urgency DeltaUrgency(int32_t value,
@ -297,6 +304,7 @@ class StreamFlowControl {
} }
private: private:
friend class ::grpc::testing::TrickledCHTTP2;
TransportFlowControl* const tfc_; TransportFlowControl* const tfc_;
const grpc_chttp2_stream* const s_; const grpc_chttp2_stream* const s_;

@ -119,6 +119,12 @@ grpc_channel_credentials *grpc_ssl_credentials_create(
// SSL Server Credentials. // SSL Server Credentials.
// //
struct grpc_ssl_server_credentials_options {
grpc_ssl_client_certificate_request_type client_certificate_request;
grpc_ssl_server_certificate_config *certificate_config;
grpc_ssl_server_certificate_config_fetcher *certificate_config_fetcher;
};
static void ssl_server_destruct(grpc_exec_ctx *exec_ctx, static void ssl_server_destruct(grpc_exec_ctx *exec_ctx,
grpc_server_credentials *creds) { grpc_server_credentials *creds) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
@ -130,9 +136,7 @@ static void ssl_server_destruct(grpc_exec_ctx *exec_ctx,
static grpc_security_status ssl_server_create_security_connector( static grpc_security_status ssl_server_create_security_connector(
grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
grpc_server_security_connector **sc) { grpc_server_security_connector **sc) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; return grpc_ssl_server_security_connector_create(exec_ctx, creds, sc);
return grpc_ssl_server_security_connector_create(exec_ctx, creds, &c->config,
sc);
} }
static grpc_server_credentials_vtable ssl_server_vtable = { static grpc_server_credentials_vtable ssl_server_vtable = {
@ -170,6 +174,86 @@ static void ssl_build_server_config(
config->num_key_cert_pairs = num_key_cert_pairs; config->num_key_cert_pairs = num_key_cert_pairs;
} }
grpc_ssl_server_certificate_config *grpc_ssl_server_certificate_config_create(
const char *pem_root_certs,
const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs) {
grpc_ssl_server_certificate_config *config =
(grpc_ssl_server_certificate_config *)gpr_zalloc(
sizeof(grpc_ssl_server_certificate_config));
if (pem_root_certs != NULL) {
config->pem_root_certs = gpr_strdup(pem_root_certs);
}
if (num_key_cert_pairs > 0) {
GPR_ASSERT(pem_key_cert_pairs != NULL);
config->pem_key_cert_pairs = (grpc_ssl_pem_key_cert_pair *)gpr_zalloc(
num_key_cert_pairs * sizeof(grpc_ssl_pem_key_cert_pair));
}
config->num_key_cert_pairs = num_key_cert_pairs;
for (size_t i = 0; i < num_key_cert_pairs; i++) {
GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL);
GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL);
config->pem_key_cert_pairs[i].cert_chain =
gpr_strdup(pem_key_cert_pairs[i].cert_chain);
config->pem_key_cert_pairs[i].private_key =
gpr_strdup(pem_key_cert_pairs[i].private_key);
}
return config;
}
void grpc_ssl_server_certificate_config_destroy(
grpc_ssl_server_certificate_config *config) {
if (config == NULL) return;
for (size_t i = 0; i < config->num_key_cert_pairs; i++) {
gpr_free((void *)config->pem_key_cert_pairs[i].private_key);
gpr_free((void *)config->pem_key_cert_pairs[i].cert_chain);
}
gpr_free(config->pem_key_cert_pairs);
gpr_free(config->pem_root_certs);
gpr_free(config);
}
grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config *config) {
grpc_ssl_server_credentials_options *options = NULL;
if (config == NULL) {
gpr_log(GPR_ERROR, "Certificate config must not be NULL.");
goto done;
}
options = (grpc_ssl_server_credentials_options *)gpr_zalloc(
sizeof(grpc_ssl_server_credentials_options));
options->client_certificate_request = client_certificate_request;
options->certificate_config = config;
done:
return options;
}
grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config_fetcher(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config_callback cb, void *user_data) {
if (cb == NULL) {
gpr_log(GPR_ERROR, "Invalid certificate config callback parameter.");
return NULL;
}
grpc_ssl_server_certificate_config_fetcher *fetcher =
(grpc_ssl_server_certificate_config_fetcher *)gpr_zalloc(
sizeof(grpc_ssl_server_certificate_config_fetcher));
fetcher->cb = cb;
fetcher->user_data = user_data;
grpc_ssl_server_credentials_options *options =
(grpc_ssl_server_credentials_options *)gpr_zalloc(
sizeof(grpc_ssl_server_credentials_options));
options->client_certificate_request = client_certificate_request;
options->certificate_config_fetcher = fetcher;
return options;
}
grpc_server_credentials *grpc_ssl_server_credentials_create( grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void *reserved) { size_t num_key_cert_pairs, int force_client_auth, void *reserved) {
@ -186,8 +270,6 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
size_t num_key_cert_pairs, size_t num_key_cert_pairs,
grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_client_certificate_request_type client_certificate_request,
void *reserved) { void *reserved) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)gpr_zalloc(
sizeof(grpc_ssl_server_credentials));
GRPC_API_TRACE( GRPC_API_TRACE(
"grpc_ssl_server_credentials_create_ex(" "grpc_ssl_server_credentials_create_ex("
"pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, " "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
@ -195,11 +277,67 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs, 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
client_certificate_request, reserved)); client_certificate_request, reserved));
GPR_ASSERT(reserved == NULL); GPR_ASSERT(reserved == NULL);
grpc_ssl_server_certificate_config *cert_config =
grpc_ssl_server_certificate_config_create(
pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs);
grpc_ssl_server_credentials_options *options =
grpc_ssl_server_credentials_create_options_using_config(
client_certificate_request, cert_config);
return grpc_ssl_server_credentials_create_with_options(options);
}
grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
grpc_ssl_server_credentials_options *options) {
grpc_server_credentials *retval = NULL;
grpc_ssl_server_credentials *c = NULL;
if (options == NULL) {
gpr_log(GPR_ERROR,
"Invalid options trying to create SSL server credentials.");
goto done;
}
if (options->certificate_config == NULL &&
options->certificate_config_fetcher == NULL) {
gpr_log(GPR_ERROR,
"SSL server credentials options must specify either "
"certificate config or fetcher.");
goto done;
} else if (options->certificate_config_fetcher != NULL &&
options->certificate_config_fetcher->cb == NULL) {
gpr_log(GPR_ERROR, "Certificate config fetcher callback must not be NULL.");
goto done;
}
c = (grpc_ssl_server_credentials *)gpr_zalloc(
sizeof(grpc_ssl_server_credentials));
c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL; c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
gpr_ref_init(&c->base.refcount, 1); gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &ssl_server_vtable; c->base.vtable = &ssl_server_vtable;
ssl_build_server_config(pem_root_certs, pem_key_cert_pairs,
num_key_cert_pairs, client_certificate_request, if (options->certificate_config_fetcher != NULL) {
&c->config); c->config.client_certificate_request = options->client_certificate_request;
return &c->base; c->certificate_config_fetcher = *options->certificate_config_fetcher;
} else {
ssl_build_server_config(options->certificate_config->pem_root_certs,
options->certificate_config->pem_key_cert_pairs,
options->certificate_config->num_key_cert_pairs,
options->client_certificate_request, &c->config);
}
retval = &c->base;
done:
grpc_ssl_server_credentials_options_destroy(options);
return retval;
}
void grpc_ssl_server_credentials_options_destroy(
grpc_ssl_server_credentials_options *o) {
if (o == NULL) return;
gpr_free(o->certificate_config_fetcher);
grpc_ssl_server_certificate_config_destroy(o->certificate_config);
gpr_free(o);
} }

@ -29,9 +29,21 @@ typedef struct {
grpc_ssl_config config; grpc_ssl_config config;
} grpc_ssl_credentials; } grpc_ssl_credentials;
struct grpc_ssl_server_certificate_config {
grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs;
size_t num_key_cert_pairs;
char *pem_root_certs;
};
typedef struct {
grpc_ssl_server_certificate_config_callback cb;
void *user_data;
} grpc_ssl_server_certificate_config_fetcher;
typedef struct { typedef struct {
grpc_server_credentials base; grpc_server_credentials base;
grpc_ssl_server_config config; grpc_ssl_server_config config;
grpc_ssl_server_certificate_config_fetcher certificate_config_fetcher;
} grpc_ssl_server_credentials; } grpc_ssl_server_credentials;
tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs( tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(

@ -34,6 +34,7 @@
#include "src/core/lib/security/context/security_context.h" #include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
#include "src/core/lib/security/transport/lb_targets_info.h" #include "src/core/lib/security/transport/lb_targets_info.h"
#include "src/core/lib/security/transport/secure_endpoint.h" #include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_handshaker.h" #include "src/core/lib/security/transport/security_handshaker.h"
@ -277,6 +278,30 @@ grpc_security_connector *grpc_security_connector_find_in_args(
return NULL; return NULL;
} }
static tsi_client_certificate_request_type
get_tsi_client_certificate_request_type(
grpc_ssl_client_certificate_request_type grpc_request_type) {
switch (grpc_request_type) {
case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE:
return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY:
return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY;
case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY:
return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY;
default:
return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
}
}
/* -- Fake implementation. -- */ /* -- Fake implementation. -- */
typedef struct { typedef struct {
@ -533,6 +558,15 @@ typedef struct {
tsi_ssl_server_handshaker_factory *server_handshaker_factory; tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector; } grpc_ssl_server_security_connector;
static bool server_connector_has_cert_config_fetcher(
grpc_ssl_server_security_connector *c) {
GPR_ASSERT(c != NULL);
grpc_ssl_server_credentials *server_creds =
(grpc_ssl_server_credentials *)c->base.server_creds;
GPR_ASSERT(server_creds != NULL);
return server_creds->certificate_config_fetcher.cb != NULL;
}
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) { grpc_security_connector *sc) {
grpc_ssl_channel_security_connector *c = grpc_ssl_channel_security_connector *c =
@ -573,7 +607,6 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
tsi_result_to_string(result)); tsi_result_to_string(result));
return; return;
} }
// Create handshakers. // Create handshakers.
grpc_handshake_manager_add( grpc_handshake_manager_add(
handshake_mgr, handshake_mgr,
@ -581,12 +614,102 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
} }
static const char **fill_alpn_protocol_strings(size_t *num_alpn_protocols) {
GPR_ASSERT(num_alpn_protocols != NULL);
*num_alpn_protocols = grpc_chttp2_num_alpn_versions();
const char **alpn_protocol_strings =
(const char **)gpr_malloc(sizeof(const char *) * (*num_alpn_protocols));
for (size_t i = 0; i < *num_alpn_protocols; i++) {
alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
}
return alpn_protocol_strings;
}
/* Attempts to replace the server_handshaker_factory with a new factory using
* the provided grpc_ssl_server_certificate_config. Should new factory creation
* fail, the existing factory will not be replaced. Returns true on success (new
* factory created). */
static bool try_replace_server_handshaker_factory(
grpc_ssl_server_security_connector *sc,
const grpc_ssl_server_certificate_config *config) {
if (config == NULL) {
gpr_log(GPR_ERROR,
"Server certificate config callback returned invalid (NULL) "
"config.");
return false;
}
gpr_log(GPR_DEBUG, "Using new server certificate config (%p).", config);
size_t num_alpn_protocols = 0;
const char **alpn_protocol_strings =
fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_ssl_pem_key_cert_pair *cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
config->pem_key_cert_pairs, config->num_key_cert_pairs);
tsi_ssl_server_handshaker_factory *new_handshaker_factory = NULL;
grpc_ssl_server_credentials *server_creds =
(grpc_ssl_server_credentials *)sc->base.server_creds;
tsi_result result = tsi_create_ssl_server_handshaker_factory_ex(
cert_pairs, config->num_key_cert_pairs, config->pem_root_certs,
get_tsi_client_certificate_request_type(
server_creds->config.client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&new_handshaker_factory);
gpr_free(cert_pairs);
gpr_free((void *)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
return false;
}
tsi_ssl_server_handshaker_factory_unref(sc->server_handshaker_factory);
sc->server_handshaker_factory = new_handshaker_factory;
return true;
}
/* Attempts to fetch the server certificate config if a callback is available.
* Current certificate config will continue to be used if the callback returns
* an error. Returns true if new credentials were sucessfully loaded. */
static bool try_fetch_ssl_server_credentials(
grpc_ssl_server_security_connector *sc) {
grpc_ssl_server_certificate_config *certificate_config = NULL;
bool status;
GPR_ASSERT(sc != NULL);
if (!server_connector_has_cert_config_fetcher(sc)) return false;
grpc_ssl_server_credentials *server_creds =
(grpc_ssl_server_credentials *)sc->base.server_creds;
grpc_ssl_certificate_config_reload_status cb_result =
server_creds->certificate_config_fetcher.cb(
server_creds->certificate_config_fetcher.user_data,
&certificate_config);
if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED) {
gpr_log(GPR_DEBUG, "No change in SSL server credentials.");
status = false;
} else if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW) {
status = try_replace_server_handshaker_factory(sc, certificate_config);
} else {
// Log error, continue using previously-loaded credentials.
gpr_log(GPR_ERROR,
"Failed fetching new server credentials, continuing to "
"use previously-loaded credentials.");
status = false;
}
if (certificate_config != NULL) {
grpc_ssl_server_certificate_config_destroy(certificate_config);
}
return status;
}
static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc, grpc_server_security_connector *sc,
grpc_handshake_manager *handshake_mgr) { grpc_handshake_manager *handshake_mgr) {
grpc_ssl_server_security_connector *c = grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc; (grpc_ssl_server_security_connector *)sc;
// Instantiate TSI handshaker. // Instantiate TSI handshaker.
try_fetch_ssl_server_credentials(c);
tsi_handshaker *tsi_hs = NULL; tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker( tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
c->server_handshaker_factory, &tsi_hs); c->server_handshaker_factory, &tsi_hs);
@ -595,7 +718,6 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
tsi_result_to_string(result)); tsi_result_to_string(result));
return; return;
} }
// Create handshakers. // Create handshakers.
grpc_handshake_manager_add( grpc_handshake_manager_add(
handshake_mgr, handshake_mgr,
@ -857,31 +979,6 @@ grpc_slice grpc_get_default_ssl_roots_for_testing(void) {
return compute_default_pem_root_certs_once(); return compute_default_pem_root_certs_once();
} }
static tsi_client_certificate_request_type
get_tsi_client_certificate_request_type(
grpc_ssl_client_certificate_request_type grpc_request_type) {
switch (grpc_request_type) {
case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE:
return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY:
return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY;
case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY:
return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY;
default:
// Is this a sane default
return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
}
}
const char *grpc_get_default_ssl_roots(void) { const char *grpc_get_default_ssl_roots(void) {
/* TODO(jboeuf@google.com): Maybe revisit the approach which consists in /* TODO(jboeuf@google.com): Maybe revisit the approach which consists in
loading all the roots once for the lifetime of the process. */ loading all the roots once for the lifetime of the process. */
@ -897,18 +994,14 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
grpc_call_credentials *request_metadata_creds, grpc_call_credentials *request_metadata_creds,
const grpc_ssl_config *config, const char *target_name, const grpc_ssl_config *config, const char *target_name,
const char *overridden_target_name, grpc_channel_security_connector **sc) { const char *overridden_target_name, grpc_channel_security_connector **sc) {
size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions(); size_t num_alpn_protocols = 0;
const char **alpn_protocol_strings = const char **alpn_protocol_strings =
(const char **)gpr_malloc(sizeof(const char *) * num_alpn_protocols); fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_result result = TSI_OK; tsi_result result = TSI_OK;
grpc_ssl_channel_security_connector *c; grpc_ssl_channel_security_connector *c;
size_t i;
const char *pem_root_certs; const char *pem_root_certs;
char *port; char *port;
bool has_key_cert_pair; bool has_key_cert_pair;
for (i = 0; i < num_alpn_protocols; i++) {
alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
}
if (config == NULL || target_name == NULL) { if (config == NULL || target_name == NULL) {
gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name."); gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name.");
@ -965,50 +1058,64 @@ error:
return GRPC_SECURITY_ERROR; return GRPC_SECURITY_ERROR;
} }
grpc_security_status grpc_ssl_server_security_connector_create( static grpc_ssl_server_security_connector *
grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_creds, grpc_ssl_server_security_connector_initialize(
const grpc_ssl_server_config *config, grpc_server_security_connector **sc) { grpc_server_credentials *server_creds) {
size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions(); grpc_ssl_server_security_connector *c =
const char **alpn_protocol_strings = (grpc_ssl_server_security_connector *)gpr_zalloc(
(const char **)gpr_malloc(sizeof(const char *) * num_alpn_protocols);
tsi_result result = TSI_OK;
grpc_ssl_server_security_connector *c;
size_t i;
for (i = 0; i < num_alpn_protocols; i++) {
alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
}
if (config == NULL || config->num_key_cert_pairs == 0) {
gpr_log(GPR_ERROR, "An SSL server needs a key and a cert.");
goto error;
}
c = (grpc_ssl_server_security_connector *)gpr_zalloc(
sizeof(grpc_ssl_server_security_connector)); sizeof(grpc_ssl_server_security_connector));
gpr_ref_init(&c->base.base.refcount, 1); gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_SSL_URL_SCHEME; c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
c->base.base.vtable = &ssl_server_vtable; c->base.base.vtable = &ssl_server_vtable;
c->base.add_handshakers = ssl_server_add_handshakers;
c->base.server_creds = grpc_server_credentials_ref(server_creds); c->base.server_creds = grpc_server_credentials_ref(server_creds);
return c;
}
grpc_security_status grpc_ssl_server_security_connector_create(
grpc_exec_ctx *exec_ctx, grpc_server_credentials *gsc,
grpc_server_security_connector **sc) {
tsi_result result = TSI_OK;
grpc_ssl_server_credentials *server_credentials =
(grpc_ssl_server_credentials *)gsc;
grpc_security_status retval = GRPC_SECURITY_OK;
GPR_ASSERT(server_credentials != NULL);
GPR_ASSERT(sc != NULL);
grpc_ssl_server_security_connector *c =
grpc_ssl_server_security_connector_initialize(gsc);
if (server_connector_has_cert_config_fetcher(c)) {
// Load initial credentials from certificate_config_fetcher:
if (!try_fetch_ssl_server_credentials(c)) {
gpr_log(GPR_ERROR, "Failed loading SSL server credentials from fetcher.");
retval = GRPC_SECURITY_ERROR;
}
} else {
size_t num_alpn_protocols = 0;
const char **alpn_protocol_strings =
fill_alpn_protocol_strings(&num_alpn_protocols);
result = tsi_create_ssl_server_handshaker_factory_ex( result = tsi_create_ssl_server_handshaker_factory_ex(
config->pem_key_cert_pairs, config->num_key_cert_pairs, server_credentials->config.pem_key_cert_pairs,
config->pem_root_certs, get_tsi_client_certificate_request_type( server_credentials->config.num_key_cert_pairs,
config->client_certificate_request), server_credentials->config.pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, get_tsi_client_certificate_request_type(
&c->server_handshaker_factory); server_credentials->config.client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings,
(uint16_t)num_alpn_protocols, &c->server_handshaker_factory);
gpr_free((void *)alpn_protocol_strings);
if (result != TSI_OK) { if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result)); tsi_result_to_string(result));
ssl_server_destroy(exec_ctx, &c->base.base); retval = GRPC_SECURITY_ERROR;
*sc = NULL; }
goto error;
} }
c->base.add_handshakers = ssl_server_add_handshakers;
*sc = &c->base;
gpr_free((void *)alpn_protocol_strings);
return GRPC_SECURITY_OK;
error: if (retval == GRPC_SECURITY_OK) {
gpr_free((void *)alpn_protocol_strings); *sc = &c->base;
return GRPC_SECURITY_ERROR; } else {
if (c != NULL) ssl_server_destroy(exec_ctx, &c->base.base);
if (sc != NULL) *sc = NULL;
}
return retval;
} }

@ -248,8 +248,8 @@ typedef struct {
specific error code otherwise. specific error code otherwise.
*/ */
grpc_security_status grpc_ssl_server_security_connector_create( grpc_security_status grpc_ssl_server_security_connector_create(
grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_creds, grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_credentials,
const grpc_ssl_server_config *config, grpc_server_security_connector **sc); grpc_server_security_connector **sc);
/* Util. */ /* Util. */
const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,

@ -29,8 +29,6 @@ grpc_tracer_flag grpc_connectivity_state_trace =
const char *grpc_connectivity_state_name(grpc_connectivity_state state) { const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
switch (state) { switch (state) {
case GRPC_CHANNEL_INIT:
return "INIT";
case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_IDLE:
return "IDLE"; return "IDLE";
case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_CONNECTING:
@ -174,7 +172,6 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_name(state), reason, error, error_string); grpc_connectivity_state_name(state), reason, error, error_string);
} }
switch (state) { switch (state) {
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:

@ -48,187 +48,13 @@
namespace grpc { namespace grpc {
namespace {
int kConnectivityCheckIntervalMsec = 500;
void WatchStateChange(void* arg);
class TagSaver final : public CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() override {}
bool FinalizeResult(void** tag, bool* status) override {
*tag = tag_;
delete this;
return true;
}
private:
void* tag_;
};
// Constantly watches channel connectivity status to reconnect a transiently
// disconnected channel. This is a temporary work-around before we have retry
// support.
class ChannelConnectivityWatcher : private GrpcLibraryCodegen {
public:
static void StartWatching(grpc_channel* channel) {
if (!IsDisabled()) {
std::unique_lock<std::mutex> lock(g_watcher_mu_);
if (g_watcher_ == nullptr) {
g_watcher_ = new ChannelConnectivityWatcher();
}
g_watcher_->StartWatchingLocked(channel);
}
}
static void StopWatching() {
if (!IsDisabled()) {
std::unique_lock<std::mutex> lock(g_watcher_mu_);
if (g_watcher_->StopWatchingLocked()) {
delete g_watcher_;
g_watcher_ = nullptr;
}
}
}
private:
ChannelConnectivityWatcher() : channel_count_(0), shutdown_(false) {
gpr_ref_init(&ref_, 0);
gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options);
gpr_thd_new(&thd_id_, &WatchStateChange, this, &options);
}
static bool IsDisabled() {
char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER");
bool disabled = gpr_is_true(env);
gpr_free(env);
return disabled;
}
void WatchStateChangeImpl() {
bool ok = false;
void* tag = NULL;
CompletionQueue::NextStatus status = CompletionQueue::GOT_EVENT;
while (true) {
{
std::unique_lock<std::mutex> lock(shutdown_mu_);
if (shutdown_) {
// Drain cq_ if the watcher is shutting down
status = cq_.AsyncNext(&tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME));
} else {
status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME));
// Make sure we've seen 2 TIMEOUTs before going to sleep
if (status == CompletionQueue::TIMEOUT) {
status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME));
if (status == CompletionQueue::TIMEOUT) {
shutdown_cv_.wait_for(lock, std::chrono::milliseconds(
kConnectivityCheckIntervalMsec));
continue;
}
}
}
}
ChannelState* channel_state = static_cast<ChannelState*>(tag);
channel_state->state =
grpc_channel_check_connectivity_state(channel_state->channel, false);
if (channel_state->state == GRPC_CHANNEL_SHUTDOWN) {
void* shutdown_tag = NULL;
channel_state->shutdown_cq.Next(&shutdown_tag, &ok);
delete channel_state;
if (gpr_unref(&ref_)) {
break;
}
} else {
TagSaver* tag_saver = new TagSaver(channel_state);
grpc_channel_watch_connectivity_state(
channel_state->channel, channel_state->state,
gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), tag_saver);
}
}
}
void StartWatchingLocked(grpc_channel* channel) {
if (thd_id_ != 0) {
gpr_ref(&ref_);
++channel_count_;
ChannelState* channel_state = new ChannelState(channel);
// The first grpc_channel_watch_connectivity_state() is not used to
// monitor the channel state change, but to hold a reference of the
// c channel. So that WatchStateChangeImpl() can observe state ==
// GRPC_CHANNEL_SHUTDOWN before the channel gets destroyed.
grpc_channel_watch_connectivity_state(
channel_state->channel, channel_state->state,
gpr_inf_future(GPR_CLOCK_REALTIME), channel_state->shutdown_cq.cq(),
new TagSaver(nullptr));
grpc_channel_watch_connectivity_state(
channel_state->channel, channel_state->state,
gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(),
new TagSaver(channel_state));
}
}
bool StopWatchingLocked() {
if (--channel_count_ == 0) {
{
std::unique_lock<std::mutex> lock(shutdown_mu_);
shutdown_ = true;
shutdown_cv_.notify_one();
}
gpr_thd_join(thd_id_);
return true;
}
return false;
}
friend void WatchStateChange(void* arg);
struct ChannelState {
explicit ChannelState(grpc_channel* channel)
: channel(channel), state(GRPC_CHANNEL_IDLE){};
grpc_channel* channel;
grpc_connectivity_state state;
CompletionQueue shutdown_cq;
};
gpr_thd_id thd_id_;
CompletionQueue cq_;
gpr_refcount ref_;
int channel_count_;
std::mutex shutdown_mu_;
std::condition_variable shutdown_cv_; // protected by shutdown_mu_
bool shutdown_; // protected by shutdown_mu_
static std::mutex g_watcher_mu_;
static ChannelConnectivityWatcher* g_watcher_; // protected by g_watcher_mu_
};
std::mutex ChannelConnectivityWatcher::g_watcher_mu_;
ChannelConnectivityWatcher* ChannelConnectivityWatcher::g_watcher_ = nullptr;
void WatchStateChange(void* arg) {
ChannelConnectivityWatcher* watcher =
static_cast<ChannelConnectivityWatcher*>(arg);
watcher->WatchStateChangeImpl();
}
} // namespace
static internal::GrpcLibraryInitializer g_gli_initializer; static internal::GrpcLibraryInitializer g_gli_initializer;
Channel::Channel(const grpc::string& host, grpc_channel* channel) Channel::Channel(const grpc::string& host, grpc_channel* channel)
: host_(host), c_channel_(channel) { : host_(host), c_channel_(channel) {
g_gli_initializer.summon(); g_gli_initializer.summon();
if (grpc_channel_support_connectivity_watcher(channel)) {
ChannelConnectivityWatcher::StartWatching(channel);
}
} }
Channel::~Channel() { Channel::~Channel() { grpc_channel_destroy(c_channel_); }
const bool stop_watching =
grpc_channel_support_connectivity_watcher(c_channel_);
grpc_channel_destroy(c_channel_);
if (stop_watching) {
ChannelConnectivityWatcher::StopWatching();
}
}
namespace { namespace {
@ -259,7 +85,8 @@ grpc::string Channel::GetServiceConfigJSON() const {
&channel_info.service_config_json); &channel_info.service_config_json);
} }
Call Channel::CreateCall(const RpcMethod& method, ClientContext* context, internal::Call Channel::CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) { CompletionQueue* cq) {
const bool kRegistered = method.channel_tag() && context->authority().empty(); const bool kRegistered = method.channel_tag() && context->authority().empty();
grpc_call* c_call = NULL; grpc_call* c_call = NULL;
@ -292,10 +119,11 @@ Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
} }
grpc_census_call_set_context(c_call, context->census_context()); grpc_census_call_set_context(c_call, context->census_context());
context->set_call(c_call, shared_from_this()); context->set_call(c_call, shared_from_this());
return Call(c_call, this, cq); return internal::Call(c_call, this, cq);
} }
void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) { void Channel::PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) {
static const size_t MAX_OPS = 8; static const size_t MAX_OPS = 8;
size_t nops = 0; size_t nops = 0;
grpc_op cops[MAX_OPS]; grpc_op cops[MAX_OPS];
@ -313,6 +141,24 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) {
return grpc_channel_check_connectivity_state(c_channel_, try_to_connect); return grpc_channel_check_connectivity_state(c_channel_, try_to_connect);
} }
namespace {
class TagSaver final : public internal::CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() override {}
bool FinalizeResult(void** tag, bool* status) override {
*tag = tag_;
delete this;
return true;
}
private:
void* tag_;
};
} // namespace
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline, gpr_timespec deadline,
CompletionQueue* cq, void* tag) { CompletionQueue* cq, void* tag) {

@ -27,8 +27,9 @@ std::unique_ptr<GenericClientAsyncReaderWriter> CallInternal(
ChannelInterface* channel, ClientContext* context, ChannelInterface* channel, ClientContext* context,
const grpc::string& method, CompletionQueue* cq, bool start, void* tag) { const grpc::string& method, CompletionQueue* cq, bool start, void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>( return std::unique_ptr<GenericClientAsyncReaderWriter>(
GenericClientAsyncReaderWriter::Create( internal::ClientAsyncReaderWriterFactory<ByteBuffer, ByteBuffer>::Create(
channel, cq, RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), channel, cq, internal::RpcMethod(method.c_str(),
internal::RpcMethod::BIDI_STREAMING),
context, start, tag)); context, start, tag));
} }
@ -52,8 +53,9 @@ std::unique_ptr<GenericClientAsyncResponseReader> GenericStub::PrepareUnaryCall(
ClientContext* context, const grpc::string& method, ClientContext* context, const grpc::string& method,
const ByteBuffer& request, CompletionQueue* cq) { const ByteBuffer& request, CompletionQueue* cq) {
return std::unique_ptr<GenericClientAsyncResponseReader>( return std::unique_ptr<GenericClientAsyncResponseReader>(
GenericClientAsyncResponseReader::Create( internal::ClientAsyncResponseReaderFactory<ByteBuffer>::Create(
channel_.get(), cq, RpcMethod(method.c_str(), RpcMethod::NORMAL_RPC), channel_.get(), cq,
internal::RpcMethod(method.c_str(), internal::RpcMethod::NORMAL_RPC),
context, request, false)); context, request, false));
} }

@ -60,7 +60,7 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
case GRPC_QUEUE_SHUTDOWN: case GRPC_QUEUE_SHUTDOWN:
return SHUTDOWN; return SHUTDOWN;
case GRPC_OP_COMPLETE: case GRPC_OP_COMPLETE:
auto cq_tag = static_cast<CompletionQueueTag*>(ev.tag); auto cq_tag = static_cast<internal::CompletionQueueTag*>(ev.tag);
*ok = ev.success != 0; *ok = ev.success != 0;
*tag = cq_tag; *tag = cq_tag;
if (cq_tag->FinalizeResult(tag, ok)) { if (cq_tag->FinalizeResult(tag, ok)) {
@ -87,7 +87,7 @@ bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
flushed_ = true; flushed_ = true;
if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag, if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag,
&res)) { &res)) {
auto cq_tag = static_cast<CompletionQueueTag*>(res_tag); auto cq_tag = static_cast<internal::CompletionQueueTag*>(res_tag);
*ok = res == 1; *ok = res == 1;
if (cq_tag->FinalizeResult(tag, ok)) { if (cq_tag->FinalizeResult(tag, ok)) {
return true; return true;

@ -37,11 +37,12 @@ const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl( DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
DefaultHealthCheckService* service) DefaultHealthCheckService* service)
: service_(service), method_(nullptr) { : service_(service), method_(nullptr) {
MethodHandler* handler = internal::MethodHandler* handler =
new RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer, ByteBuffer>( new internal::RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer,
ByteBuffer>(
std::mem_fn(&HealthCheckServiceImpl::Check), this); std::mem_fn(&HealthCheckServiceImpl::Check), this);
method_ = new RpcServiceMethod(kHealthCheckMethodName, RpcMethod::NORMAL_RPC, method_ = new internal::RpcServiceMethod(
handler); kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, handler);
AddMethod(method_); AddMethod(method_);
} }

@ -41,7 +41,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
private: private:
const DefaultHealthCheckService* const service_; const DefaultHealthCheckService* const service_;
RpcServiceMethod* method_; internal::RpcServiceMethod* method_;
}; };
DefaultHealthCheckService(); DefaultHealthCheckService();

@ -90,7 +90,8 @@ class Server::UnimplementedAsyncRequest final
ServerCompletionQueue* const cq_; ServerCompletionQueue* const cq_;
}; };
typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> typedef internal::SneakyCallOpSet<internal::CallOpSendInitialMetadata,
internal::CallOpServerSendStatus>
UnimplementedAsyncResponseOp; UnimplementedAsyncResponseOp;
class Server::UnimplementedAsyncResponse final class Server::UnimplementedAsyncResponse final
: public UnimplementedAsyncResponseOp { : public UnimplementedAsyncResponseOp {
@ -108,12 +109,12 @@ class Server::UnimplementedAsyncResponse final
UnimplementedAsyncRequest* const request_; UnimplementedAsyncRequest* const request_;
}; };
class ShutdownTag : public CompletionQueueTag { class ShutdownTag : public internal::CompletionQueueTag {
public: public:
bool FinalizeResult(void** tag, bool* status) { return false; } bool FinalizeResult(void** tag, bool* status) { return false; }
}; };
class DummyTag : public CompletionQueueTag { class DummyTag : public internal::CompletionQueueTag {
public: public:
bool FinalizeResult(void** tag, bool* status) { bool FinalizeResult(void** tag, bool* status) {
*status = true; *status = true;
@ -121,15 +122,15 @@ class DummyTag : public CompletionQueueTag {
} }
}; };
class Server::SyncRequest final : public CompletionQueueTag { class Server::SyncRequest final : public internal::CompletionQueueTag {
public: public:
SyncRequest(RpcServiceMethod* method, void* tag) SyncRequest(internal::RpcServiceMethod* method, void* tag)
: method_(method), : method_(method),
tag_(tag), tag_(tag),
in_flight_(false), in_flight_(false),
has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC || has_request_payload_(
method->method_type() == method->method_type() == internal::RpcMethod::NORMAL_RPC ||
RpcMethod::SERVER_STREAMING), method->method_type() == internal::RpcMethod::SERVER_STREAMING),
call_details_(nullptr), call_details_(nullptr),
cq_(nullptr) { cq_(nullptr) {
grpc_metadata_array_init(&request_metadata_); grpc_metadata_array_init(&request_metadata_);
@ -212,14 +213,14 @@ class Server::SyncRequest final : public CompletionQueueTag {
void Run(std::shared_ptr<GlobalCallbacks> global_callbacks) { void Run(std::shared_ptr<GlobalCallbacks> global_callbacks) {
ctx_.BeginCompletionOp(&call_); ctx_.BeginCompletionOp(&call_);
global_callbacks->PreSynchronousRequest(&ctx_); global_callbacks->PreSynchronousRequest(&ctx_);
method_->handler()->RunHandler( method_->handler()->RunHandler(internal::MethodHandler::HandlerParameter(
MethodHandler::HandlerParameter(&call_, &ctx_, request_payload_)); &call_, &ctx_, request_payload_));
global_callbacks->PostSynchronousRequest(&ctx_); global_callbacks->PostSynchronousRequest(&ctx_);
request_payload_ = nullptr; request_payload_ = nullptr;
cq_.Shutdown(); cq_.Shutdown();
CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag(); internal::CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME)); cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
/* Ensure the cq_ is shutdown */ /* Ensure the cq_ is shutdown */
@ -229,15 +230,15 @@ class Server::SyncRequest final : public CompletionQueueTag {
private: private:
CompletionQueue cq_; CompletionQueue cq_;
Call call_; internal::Call call_;
ServerContext ctx_; ServerContext ctx_;
const bool has_request_payload_; const bool has_request_payload_;
grpc_byte_buffer* request_payload_; grpc_byte_buffer* request_payload_;
RpcServiceMethod* const method_; internal::RpcServiceMethod* const method_;
}; };
private: private:
RpcServiceMethod* const method_; internal::RpcServiceMethod* const method_;
void* const tag_; void* const tag_;
bool in_flight_; bool in_flight_;
const bool has_request_payload_; const bool has_request_payload_;
@ -311,14 +312,15 @@ class Server::SyncRequestThreadManager : public ThreadManager {
// object // object
} }
void AddSyncMethod(RpcServiceMethod* method, void* tag) { void AddSyncMethod(internal::RpcServiceMethod* method, void* tag) {
sync_requests_.emplace_back(new SyncRequest(method, tag)); sync_requests_.emplace_back(new SyncRequest(method, tag));
} }
void AddUnknownSyncMethod() { void AddUnknownSyncMethod() {
if (!sync_requests_.empty()) { if (!sync_requests_.empty()) {
unknown_method_.reset(new RpcServiceMethod( unknown_method_.reset(new internal::RpcServiceMethod(
"unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); "unknown", internal::RpcMethod::BIDI_STREAMING,
new internal::UnknownMethodHandler));
sync_requests_.emplace_back( sync_requests_.emplace_back(
new SyncRequest(unknown_method_.get(), nullptr)); new SyncRequest(unknown_method_.get(), nullptr));
} }
@ -355,8 +357,8 @@ class Server::SyncRequestThreadManager : public ThreadManager {
CompletionQueue* server_cq_; CompletionQueue* server_cq_;
int cq_timeout_msec_; int cq_timeout_msec_;
std::vector<std::unique_ptr<SyncRequest>> sync_requests_; std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
std::unique_ptr<RpcServiceMethod> unknown_method_; std::unique_ptr<internal::RpcServiceMethod> unknown_method_;
std::unique_ptr<RpcServiceMethod> health_check_; std::unique_ptr<internal::RpcServiceMethod> health_check_;
std::shared_ptr<Server::GlobalCallbacks> global_callbacks_; std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
}; };
@ -439,13 +441,13 @@ std::shared_ptr<Channel> Server::InProcessChannel(
} }
static grpc_server_register_method_payload_handling PayloadHandlingForMethod( static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
RpcServiceMethod* method) { internal::RpcServiceMethod* method) {
switch (method->method_type()) { switch (method->method_type()) {
case RpcMethod::NORMAL_RPC: case internal::RpcMethod::NORMAL_RPC:
case RpcMethod::SERVER_STREAMING: case internal::RpcMethod::SERVER_STREAMING:
return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER; return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
case RpcMethod::CLIENT_STREAMING: case internal::RpcMethod::CLIENT_STREAMING:
case RpcMethod::BIDI_STREAMING: case internal::RpcMethod::BIDI_STREAMING:
return GRPC_SRM_PAYLOAD_NONE; return GRPC_SRM_PAYLOAD_NONE;
} }
GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;); GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
@ -466,7 +468,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
continue; continue;
} }
RpcServiceMethod* method = it->get(); internal::RpcServiceMethod* method = it->get();
void* tag = grpc_server_register_method( void* tag = grpc_server_register_method(
server_, method->name(), host ? host->c_str() : nullptr, server_, method->name(), host ? host->c_str() : nullptr,
PayloadHandlingForMethod(method), 0); PayloadHandlingForMethod(method), 0);
@ -606,7 +608,8 @@ void Server::Wait() {
} }
} }
void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) { void Server::PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) {
static const size_t MAX_OPS = 8; static const size_t MAX_OPS = 8;
size_t nops = 0; size_t nops = 0;
grpc_op cops[MAX_OPS]; grpc_op cops[MAX_OPS];
@ -622,8 +625,8 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
ServerInterface::BaseAsyncRequest::BaseAsyncRequest( ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
ServerInterface* server, ServerContext* context, ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag, internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
bool delete_on_finalize) void* tag, bool delete_on_finalize)
: server_(server), : server_(server),
context_(context), context_(context),
stream_(stream), stream_(stream),
@ -645,7 +648,8 @@ bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
} }
context_->set_call(call_); context_->set_call(call_);
context_->cq_ = call_cq_; context_->cq_ = call_cq_;
Call call(call_, server_, call_cq_, server_->max_receive_message_size()); internal::Call call(call_, server_, call_cq_,
server_->max_receive_message_size());
if (*status && call_) { if (*status && call_) {
context_->BeginCompletionOp(&call); context_->BeginCompletionOp(&call);
} }
@ -660,7 +664,8 @@ bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest( ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
ServerInterface* server, ServerContext* context, ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag) internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
void* tag)
: BaseAsyncRequest(server, context, stream, call_cq, tag, true) {} : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
void ServerInterface::RegisteredAsyncRequest::IssueRequest( void ServerInterface::RegisteredAsyncRequest::IssueRequest(
@ -675,7 +680,7 @@ void ServerInterface::RegisteredAsyncRequest::IssueRequest(
ServerInterface::GenericAsyncRequest::GenericAsyncRequest( ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
ServerInterface* server, GenericServerContext* context, ServerInterface* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize) ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
: BaseAsyncRequest(server, context, stream, call_cq, tag, : BaseAsyncRequest(server, context, stream, call_cq, tag,
delete_on_finalize) { delete_on_finalize) {
@ -718,7 +723,7 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
UnimplementedAsyncRequest* request) UnimplementedAsyncRequest* request)
: request_(request) { : request_(request) {
Status status(StatusCode::UNIMPLEMENTED, ""); Status status(StatusCode::UNIMPLEMENTED, "");
UnknownMethodHandler::FillOps(request_->context(), this); internal::UnknownMethodHandler::FillOps(request_->context(), this);
request_->stream()->call_.PerformOps(this); request_->stream()->call_.PerformOps(this);
} }

@ -37,7 +37,7 @@ namespace grpc {
// CompletionOp // CompletionOp
class ServerContext::CompletionOp final : public CallOpSetInterface { class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
public: public:
// initial refs: one in the server context, one in the cq // initial refs: one in the server context, one in the cq
CompletionOp() CompletionOp()
@ -146,7 +146,7 @@ ServerContext::~ServerContext() {
} }
} }
void ServerContext::BeginCompletionOp(Call* call) { void ServerContext::BeginCompletionOp(internal::Call* call) {
GPR_ASSERT(!completion_op_); GPR_ASSERT(!completion_op_);
completion_op_ = new CompletionOp(); completion_op_ = new CompletionOp();
if (has_notify_when_done_tag_) { if (has_notify_when_done_tag_) {
@ -155,8 +155,8 @@ void ServerContext::BeginCompletionOp(Call* call) {
call->PerformOps(completion_op_); call->PerformOps(completion_op_);
} }
CompletionQueueTag* ServerContext::GetCompletionOpTag() { internal::CompletionQueueTag* ServerContext::GetCompletionOpTag() {
return static_cast<CompletionQueueTag*>(completion_op_); return static_cast<internal::CompletionQueueTag*>(completion_op_);
} }
void ServerContext::AddInitialMetadata(const grpc::string& key, void ServerContext::AddInitialMetadata(const grpc::string& key,

@ -254,6 +254,7 @@ CORE_SOURCE_FILES = [
'src/core/tsi/transport_security_adapter.cc', 'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc', 'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc', 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc', 'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc', 'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc', 'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -293,6 +294,7 @@ CORE_SOURCE_FILES = [
'third_party/nanopb/pb_encode.c', 'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',

@ -155,8 +155,14 @@ grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_impor
grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import; grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import;
grpc_secure_channel_create_type grpc_secure_channel_create_import; grpc_secure_channel_create_type grpc_secure_channel_create_import;
grpc_server_credentials_release_type grpc_server_credentials_release_import; grpc_server_credentials_release_type grpc_server_credentials_release_import;
grpc_ssl_server_certificate_config_create_type grpc_ssl_server_certificate_config_create_import;
grpc_ssl_server_certificate_config_destroy_type grpc_ssl_server_certificate_config_destroy_import;
grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
grpc_ssl_server_credentials_create_options_using_config_type grpc_ssl_server_credentials_create_options_using_config_import;
grpc_ssl_server_credentials_create_options_using_config_fetcher_type grpc_ssl_server_credentials_create_options_using_config_fetcher_import;
grpc_ssl_server_credentials_options_destroy_type grpc_ssl_server_credentials_options_destroy_import;
grpc_ssl_server_credentials_create_with_options_type grpc_ssl_server_credentials_create_with_options_import;
grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
grpc_call_set_credentials_type grpc_call_set_credentials_import; grpc_call_set_credentials_type grpc_call_set_credentials_import;
grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import; grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import;
@ -465,8 +471,14 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin"); grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin");
grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create"); grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create");
grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release"); grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release");
grpc_ssl_server_certificate_config_create_import = (grpc_ssl_server_certificate_config_create_type) GetProcAddress(library, "grpc_ssl_server_certificate_config_create");
grpc_ssl_server_certificate_config_destroy_import = (grpc_ssl_server_certificate_config_destroy_type) GetProcAddress(library, "grpc_ssl_server_certificate_config_destroy");
grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create"); grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create");
grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex"); grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex");
grpc_ssl_server_credentials_create_options_using_config_import = (grpc_ssl_server_credentials_create_options_using_config_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_options_using_config");
grpc_ssl_server_credentials_create_options_using_config_fetcher_import = (grpc_ssl_server_credentials_create_options_using_config_fetcher_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_options_using_config_fetcher");
grpc_ssl_server_credentials_options_destroy_import = (grpc_ssl_server_credentials_options_destroy_type) GetProcAddress(library, "grpc_ssl_server_credentials_options_destroy");
grpc_ssl_server_credentials_create_with_options_import = (grpc_ssl_server_credentials_create_with_options_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_with_options");
grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port"); grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port");
grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials"); grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials");
grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor"); grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor");

@ -446,12 +446,30 @@ extern grpc_secure_channel_create_type grpc_secure_channel_create_import;
typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds); typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds);
extern grpc_server_credentials_release_type grpc_server_credentials_release_import; extern grpc_server_credentials_release_type grpc_server_credentials_release_import;
#define grpc_server_credentials_release grpc_server_credentials_release_import #define grpc_server_credentials_release grpc_server_credentials_release_import
typedef grpc_ssl_server_certificate_config *(*grpc_ssl_server_certificate_config_create_type)(const char *pem_root_certs, const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs);
extern grpc_ssl_server_certificate_config_create_type grpc_ssl_server_certificate_config_create_import;
#define grpc_ssl_server_certificate_config_create grpc_ssl_server_certificate_config_create_import
typedef void(*grpc_ssl_server_certificate_config_destroy_type)(grpc_ssl_server_certificate_config *config);
extern grpc_ssl_server_certificate_config_destroy_type grpc_ssl_server_certificate_config_destroy_import;
#define grpc_ssl_server_certificate_config_destroy grpc_ssl_server_certificate_config_destroy_import
typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved); typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved);
extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
#define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import #define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import
typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved); typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved);
extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
#define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import #define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import
typedef grpc_ssl_server_credentials_options *(*grpc_ssl_server_credentials_create_options_using_config_type)(grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_server_certificate_config *certificate_config);
extern grpc_ssl_server_credentials_create_options_using_config_type grpc_ssl_server_credentials_create_options_using_config_import;
#define grpc_ssl_server_credentials_create_options_using_config grpc_ssl_server_credentials_create_options_using_config_import
typedef grpc_ssl_server_credentials_options *(*grpc_ssl_server_credentials_create_options_using_config_fetcher_type)(grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_server_certificate_config_callback cb, void *user_data);
extern grpc_ssl_server_credentials_create_options_using_config_fetcher_type grpc_ssl_server_credentials_create_options_using_config_fetcher_import;
#define grpc_ssl_server_credentials_create_options_using_config_fetcher grpc_ssl_server_credentials_create_options_using_config_fetcher_import
typedef void(*grpc_ssl_server_credentials_options_destroy_type)(grpc_ssl_server_credentials_options *options);
extern grpc_ssl_server_credentials_options_destroy_type grpc_ssl_server_credentials_options_destroy_import;
#define grpc_ssl_server_credentials_options_destroy grpc_ssl_server_credentials_options_destroy_import
typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_with_options_type)(grpc_ssl_server_credentials_options *options);
extern grpc_ssl_server_credentials_create_with_options_type grpc_ssl_server_credentials_create_with_options_import;
#define grpc_ssl_server_credentials_create_with_options grpc_ssl_server_credentials_create_with_options_import
typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds); typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds);
extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
#define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import #define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import

@ -53,8 +53,8 @@ typedef struct request_sequences {
size_t n; /* number of iterations */ size_t n; /* number of iterations */
int *connections; /* indexed by the interation number, value is the index of int *connections; /* indexed by the interation number, value is the index of
the server it connected to or -1 if none */ the server it connected to or -1 if none */
int *connectivity_states; /* indexed by the interation number, value is the /* indexed by the interation number, value is the client connectivity state */
client connectivity state */ grpc_connectivity_state *connectivity_states;
} request_sequences; } request_sequences;
typedef void (*verifier_fn)(const servers_fixture *, grpc_channel *, typedef void (*verifier_fn)(const servers_fixture *, grpc_channel *,

@ -38,6 +38,7 @@ int main(int argc, char **argv) {
int i; int i;
grpc_test_init(argc, argv); grpc_test_init(argc, argv);
grpc_init();
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_OK, GRPC_HTTP2_NO_ERROR); GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_OK, GRPC_HTTP2_NO_ERROR);
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_CANCELLED, GRPC_HTTP2_CANCEL); GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_CANCELLED, GRPC_HTTP2_CANCEL);
@ -129,6 +130,11 @@ int main(int argc, char **argv) {
GRPC_STATUS_INTERNAL); GRPC_STATUS_INTERNAL);
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_REFUSED_STREAM, after_deadline, HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_REFUSED_STREAM, after_deadline,
GRPC_STATUS_UNAVAILABLE); GRPC_STATUS_UNAVAILABLE);
// We only have millisecond granularity in our timing code. This sleeps for 5
// millis to ensure that the status conversion code will pick up the fact
// that the deadline has expired.
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(5, GPR_TIMESPAN)));
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_CANCEL, after_deadline, HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_CANCEL, after_deadline,
GRPC_STATUS_DEADLINE_EXCEEDED); GRPC_STATUS_DEADLINE_EXCEEDED);
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_COMPRESSION_ERROR, after_deadline, HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_COMPRESSION_ERROR, after_deadline,
@ -158,5 +164,7 @@ int main(int argc, char **argv) {
grpc_http2_status_to_grpc_status(i); grpc_http2_status_to_grpc_status(i);
} }
grpc_shutdown();
return 0; return 0;
} }

@ -39,7 +39,6 @@
namespace grpc { namespace grpc {
class CompletionQueue; class CompletionQueue;
class Channel; class Channel;
class RpcService;
class ServerCompletionQueue; class ServerCompletionQueue;
class ServerContext; class ServerContext;
} // namespace grpc } // namespace grpc
@ -169,10 +168,10 @@ class ServiceA final {
::grpc::ClientReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* MethodA4Raw(::grpc::ClientContext* context) override; ::grpc::ClientReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* MethodA4Raw(::grpc::ClientContext* context) override;
::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* AsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) override; ::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* AsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) override;
::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* PrepareAsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* PrepareAsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) override;
const ::grpc::RpcMethod rpcmethod_MethodA1_; const ::grpc::internal::RpcMethod rpcmethod_MethodA1_;
const ::grpc::RpcMethod rpcmethod_MethodA2_; const ::grpc::internal::RpcMethod rpcmethod_MethodA2_;
const ::grpc::RpcMethod rpcmethod_MethodA3_; const ::grpc::internal::RpcMethod rpcmethod_MethodA3_;
const ::grpc::RpcMethod rpcmethod_MethodA4_; const ::grpc::internal::RpcMethod rpcmethod_MethodA4_;
}; };
static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
@ -352,7 +351,7 @@ class ServiceA final {
public: public:
WithStreamedUnaryMethod_MethodA1() { WithStreamedUnaryMethod_MethodA1() {
::grpc::Service::MarkMethodStreamed(0, ::grpc::Service::MarkMethodStreamed(0,
new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2))); new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2)));
} }
~WithStreamedUnaryMethod_MethodA1() override { ~WithStreamedUnaryMethod_MethodA1() override {
BaseClassMustBeDerivedFromService(this); BaseClassMustBeDerivedFromService(this);
@ -373,7 +372,7 @@ class ServiceA final {
public: public:
WithSplitStreamingMethod_MethodA3() { WithSplitStreamingMethod_MethodA3() {
::grpc::Service::MarkMethodStreamed(2, ::grpc::Service::MarkMethodStreamed(2,
new ::grpc::SplitServerStreamingHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithSplitStreamingMethod_MethodA3<BaseClass>::StreamedMethodA3, this, std::placeholders::_1, std::placeholders::_2))); new ::grpc::internal::SplitServerStreamingHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithSplitStreamingMethod_MethodA3<BaseClass>::StreamedMethodA3, this, std::placeholders::_1, std::placeholders::_2)));
} }
~WithSplitStreamingMethod_MethodA3() override { ~WithSplitStreamingMethod_MethodA3() override {
BaseClassMustBeDerivedFromService(this); BaseClassMustBeDerivedFromService(this);
@ -427,7 +426,7 @@ class ServiceB final {
std::shared_ptr< ::grpc::ChannelInterface> channel_; std::shared_ptr< ::grpc::ChannelInterface> channel_;
::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* AsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* AsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override;
::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* PrepareAsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* PrepareAsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override;
const ::grpc::RpcMethod rpcmethod_MethodB1_; const ::grpc::internal::RpcMethod rpcmethod_MethodB1_;
}; };
static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
@ -484,7 +483,7 @@ class ServiceB final {
public: public:
WithStreamedUnaryMethod_MethodB1() { WithStreamedUnaryMethod_MethodB1() {
::grpc::Service::MarkMethodStreamed(0, ::grpc::Service::MarkMethodStreamed(0,
new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2))); new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2)));
} }
~WithStreamedUnaryMethod_MethodB1() override { ~WithStreamedUnaryMethod_MethodB1() override {
BaseClassMustBeDerivedFromService(this); BaseClassMustBeDerivedFromService(this);

@ -28,12 +28,14 @@
#include <grpc++/server_builder.h> #include <grpc++/server_builder.h>
#include <grpc++/server_context.h> #include <grpc++/server_context.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h> #include <grpc/support/thd.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include <grpc/support/tls.h> #include <grpc/support/tls.h>
#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/port.h"
#include "src/core/lib/support/env.h"
#include "src/proto/grpc/health/v1/health.grpc.pb.h" #include "src/proto/grpc/health/v1/health.grpc.pb.h"
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h" #include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/echo.grpc.pb.h"
@ -459,6 +461,15 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) {
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
} }
gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "200");
int poller_slowdown_factor = 1;
// It needs 2 pollset_works to reconnect the channel with polling engine
// "poll"
char* s = gpr_getenv("GRPC_POLL_STRATEGY");
if (s != NULL && 0 == strcmp(s, "poll")) {
poller_slowdown_factor = 2;
}
gpr_free(s);
ResetStub(); ResetStub();
SendRpc(1); SendRpc(1);
server_->Shutdown(); server_->Shutdown();
@ -468,10 +479,13 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) {
while (cq_->Next(&ignored_tag, &ignored_ok)) while (cq_->Next(&ignored_tag, &ignored_ok))
; ;
BuildAndStartServer(); BuildAndStartServer();
// It needs more than kConnectivityCheckIntervalMsec time to reconnect the // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
// channel. // reconnect the channel.
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_sleep_until(gpr_time_add(
gpr_time_from_millis(1600, GPR_TIMESPAN))); gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(
300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
GPR_TIMESPAN)));
SendRpc(1); SendRpc(1);
} }

@ -303,7 +303,7 @@ TEST_F(ClientLbEnd2endTest, PickFirstUpdates) {
ports.clear(); ports.clear();
SetNextResolution(ports); SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET none *******"); gpr_log(GPR_INFO, "****** SET none *******");
grpc_connectivity_state channel_state = GRPC_CHANNEL_INIT; grpc_connectivity_state channel_state;
do { do {
channel_state = channel_->GetState(true /* try to connect */); channel_state = channel_->GetState(true /* try to connect */);
} while (channel_state == GRPC_CHANNEL_READY); } while (channel_state == GRPC_CHANNEL_READY);
@ -479,7 +479,7 @@ TEST_F(ClientLbEnd2endTest, RoundRobinUpdates) {
// An empty update will result in the channel going into TRANSIENT_FAILURE. // An empty update will result in the channel going into TRANSIENT_FAILURE.
ports.clear(); ports.clear();
SetNextResolution(ports); SetNextResolution(ports);
grpc_connectivity_state channel_state = GRPC_CHANNEL_INIT; grpc_connectivity_state channel_state;
do { do {
channel_state = channel_->GetState(true /* try to connect */); channel_state = channel_->GetState(true /* try to connect */);
} while (channel_state == GRPC_CHANNEL_READY); } while (channel_state == GRPC_CHANNEL_READY);

@ -30,11 +30,13 @@
#include <grpc++/server_builder.h> #include <grpc++/server_builder.h>
#include <grpc++/server_context.h> #include <grpc++/server_context.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/thd.h> #include <grpc/support/thd.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/support/env.h"
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h" #include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h" #include "test/core/util/port.h"
@ -704,13 +706,25 @@ TEST_P(End2endTest, ReconnectChannel) {
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
} }
gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "200");
int poller_slowdown_factor = 1;
// It needs 2 pollset_works to reconnect the channel with polling engine
// "poll"
char* s = gpr_getenv("GRPC_POLL_STRATEGY");
if (s != NULL && 0 == strcmp(s, "poll")) {
poller_slowdown_factor = 2;
}
gpr_free(s);
ResetStub(); ResetStub();
SendRpc(stub_.get(), 1, false); SendRpc(stub_.get(), 1, false);
RestartServer(std::shared_ptr<AuthMetadataProcessor>()); RestartServer(std::shared_ptr<AuthMetadataProcessor>());
// It needs more than kConnectivityCheckIntervalMsec time to reconnect the // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
// channel. // reconnect the channel.
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_sleep_until(gpr_time_add(
gpr_time_from_millis(1600, GPR_TIMESPAN))); gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(
300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
GPR_TIMESPAN)));
SendRpc(stub_.get(), 1, false); SendRpc(stub_.get(), 1, false);
} }

@ -68,7 +68,7 @@ BENCHMARK(BM_CreateDestroyCore);
static void DoneWithCompletionOnStack(grpc_exec_ctx* exec_ctx, void* arg, static void DoneWithCompletionOnStack(grpc_exec_ctx* exec_ctx, void* arg,
grpc_cq_completion* completion) {} grpc_cq_completion* completion) {}
class DummyTag final : public CompletionQueueTag { class DummyTag final : public internal::CompletionQueueTag {
public: public:
bool FinalizeResult(void** tag, bool* status) override { return true; } bool FinalizeResult(void** tag, bool* status) override { return true; }
}; };

@ -21,6 +21,7 @@
#include <benchmark/benchmark.h> #include <benchmark/benchmark.h>
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <fstream> #include <fstream>
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/internal.h" #include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/iomgr/timer_manager.h" #include "src/core/lib/iomgr/timer_manager.h"
@ -142,17 +143,17 @@ class TrickledCHTTP2 : public EndpointPairFixture {
client->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr, client->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr, server->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr, server->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
client->flow_control->remote_window(), client->flow_control->remote_window_,
server->flow_control->remote_window(), server->flow_control->remote_window_,
client->flow_control->announced_window(), client->flow_control->announced_window_,
server->flow_control->announced_window(), server->flow_control->announced_window_,
client_stream ? client_stream->flow_control->remote_window_delta() : -1, client_stream ? client_stream->flow_control->remote_window_delta_ : -1,
server_stream ? server_stream->flow_control->remote_window_delta() : -1, server_stream ? server_stream->flow_control->remote_window_delta_ : -1,
client_stream ? client_stream->flow_control->local_window_delta() : -1, client_stream ? client_stream->flow_control->local_window_delta_ : -1,
server_stream ? server_stream->flow_control->local_window_delta() : -1, server_stream ? server_stream->flow_control->local_window_delta_ : -1,
client_stream ? client_stream->flow_control->announced_window_delta() client_stream ? client_stream->flow_control->announced_window_delta_
: -1, : -1,
server_stream ? server_stream->flow_control->announced_window_delta() server_stream ? server_stream->flow_control->announced_window_delta_
: -1, : -1,
client->settings[GRPC_PEER_SETTINGS] client->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],

@ -16,6 +16,8 @@
* *
*/ */
#include <string.h>
#include "test/cpp/microbenchmarks/helpers.h" #include "test/cpp/microbenchmarks/helpers.h"
void TrackCounters::Finish(benchmark::State &state) { void TrackCounters::Finish(benchmark::State &state) {
@ -45,10 +47,14 @@ void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) {
<< "/iter:" << ((double)stats.counters[i] / (double)state.iterations()); << "/iter:" << ((double)stats.counters[i] / (double)state.iterations());
} }
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
out << " " << grpc_stats_histogram_name[i] << "-median:" std::ostringstream median_ss;
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0) median_ss << grpc_stats_histogram_name[i] << "-median";
<< " " << grpc_stats_histogram_name[i] << "-99p:" state.counters[median_ss.str()] = benchmark::Counter(
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0); grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0));
std::ostringstream tail_ss;
tail_ss << grpc_stats_histogram_name[i] << "-99p";
state.counters[tail_ss.str()] = benchmark::Counter(
grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0));
} }
#ifdef GPR_LOW_LEVEL_COUNTERS #ifdef GPR_LOW_LEVEL_COUNTERS
grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot(); grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot();

@ -245,9 +245,20 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
if (!cli_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) { if (!cli_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
return; return;
} }
ClientRpcContext* ctx; ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
std::mutex* shutdown_mu = &shutdown_state_[thread_idx]->mutex; std::mutex* shutdown_mu = &shutdown_state_[thread_idx]->mutex;
do { shutdown_mu->lock();
while (cli_cqs_[cq_[thread_idx]]->DoThenAsyncNext(
[&, ctx, ok, entry_ptr, shutdown_mu]() {
if (!ctx->RunNextState(ok, entry_ptr)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
ctx->StartNewClone(cli_cqs_[cq_[thread_idx]].get());
delete ctx;
}
shutdown_mu->unlock();
},
&got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME))) {
t->UpdateHistogram(entry_ptr); t->UpdateHistogram(entry_ptr);
// Got a regular event, so process it // Got a regular event, so process it
ctx = ClientRpcContext::detag(got_tag); ctx = ClientRpcContext::detag(got_tag);
@ -265,18 +276,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
shutdown_mu->unlock(); shutdown_mu->unlock();
return; return;
} }
} while (cli_cqs_[cq_[thread_idx]]->DoThenAsyncNext(
[&, ctx, ok, entry_ptr, shutdown_mu]() {
bool next_ok = ok;
if (!ctx->RunNextState(next_ok, entry_ptr)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
ctx->StartNewClone(cli_cqs_[cq_[thread_idx]].get());
delete ctx;
} }
shutdown_mu->unlock();
},
&got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME)));
} }
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_; std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;

@ -70,7 +70,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
ServerAsyncReaderWriter<ResponseType, RequestType> *, ServerAsyncReaderWriter<ResponseType, RequestType> *,
CompletionQueue *, ServerCompletionQueue *, void *)> CompletionQueue *, ServerCompletionQueue *, void *)>
request_streaming_both_ways_function, request_streaming_both_ways_function,
std::function<grpc::Status(const PayloadConfig &, const RequestType *, std::function<grpc::Status(const PayloadConfig &, RequestType *,
ResponseType *)> ResponseType *)>
process_rpc) process_rpc)
: Server(config) { : Server(config) {
@ -206,13 +206,12 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
return; return;
} }
ServerRpcContext *ctx; ServerRpcContext *ctx;
std::mutex *mu_ptr; std::mutex *mu_ptr = &shutdown_state_[thread_idx]->mutex;
do { do {
ctx = detag(got_tag); ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke // The tag is a pointer to an RPC context to invoke
// Proceed while holding a lock to make sure that // Proceed while holding a lock to make sure that
// this thread isn't supposed to shut down // this thread isn't supposed to shut down
mu_ptr = &shutdown_state_[thread_idx]->mutex;
mu_ptr->lock(); mu_ptr->lock();
if (shutdown_state_[thread_idx]->shutdown) { if (shutdown_state_[thread_idx]->shutdown) {
mu_ptr->unlock(); mu_ptr->unlock();
@ -255,7 +254,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
grpc::ServerAsyncResponseWriter<ResponseType> *, grpc::ServerAsyncResponseWriter<ResponseType> *,
void *)> void *)>
request_method, request_method,
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)>
invoke_method) invoke_method)
: srv_ctx_(new ServerContextType), : srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextUnaryImpl::invoker), next_state_(&ServerRpcContextUnaryImpl::invoker),
@ -301,8 +300,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::function<void(ServerContextType *, RequestType *, std::function<void(ServerContextType *, RequestType *,
grpc::ServerAsyncResponseWriter<ResponseType> *, void *)> grpc::ServerAsyncResponseWriter<ResponseType> *, void *)>
request_method_; request_method_;
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)> invoke_method_;
invoke_method_;
grpc::ServerAsyncResponseWriter<ResponseType> response_writer_; grpc::ServerAsyncResponseWriter<ResponseType> response_writer_;
}; };
@ -313,7 +311,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
ServerContextType *, ServerContextType *,
grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)> grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)>
request_method, request_method,
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)>
invoke_method) invoke_method)
: srv_ctx_(new ServerContextType), : srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingImpl::request_done), next_state_(&ServerRpcContextStreamingImpl::request_done),
@ -381,8 +379,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
ServerContextType *, ServerContextType *,
grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)> grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)>
request_method_; request_method_;
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)> invoke_method_;
invoke_method_;
grpc::ServerAsyncReaderWriter<ResponseType, RequestType> stream_; grpc::ServerAsyncReaderWriter<ResponseType, RequestType> stream_;
}; };
@ -394,7 +391,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
grpc::ServerAsyncReader<ResponseType, RequestType> *, grpc::ServerAsyncReader<ResponseType, RequestType> *,
void *)> void *)>
request_method, request_method,
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)>
invoke_method) invoke_method)
: srv_ctx_(new ServerContextType), : srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingFromClientImpl::request_done), next_state_(&ServerRpcContextStreamingFromClientImpl::request_done),
@ -452,8 +449,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
grpc::ServerAsyncReader<ResponseType, RequestType> *, grpc::ServerAsyncReader<ResponseType, RequestType> *,
void *)> void *)>
request_method_; request_method_;
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)> invoke_method_;
invoke_method_;
grpc::ServerAsyncReader<ResponseType, RequestType> stream_; grpc::ServerAsyncReader<ResponseType, RequestType> stream_;
}; };
@ -464,7 +460,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::function<void(ServerContextType *, RequestType *, std::function<void(ServerContextType *, RequestType *,
grpc::ServerAsyncWriter<ResponseType> *, void *)> grpc::ServerAsyncWriter<ResponseType> *, void *)>
request_method, request_method,
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)>
invoke_method) invoke_method)
: srv_ctx_(new ServerContextType), : srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingFromServerImpl::request_done), next_state_(&ServerRpcContextStreamingFromServerImpl::request_done),
@ -521,8 +517,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::function<void(ServerContextType *, RequestType *, std::function<void(ServerContextType *, RequestType *,
grpc::ServerAsyncWriter<ResponseType> *, void *)> grpc::ServerAsyncWriter<ResponseType> *, void *)>
request_method_; request_method_;
std::function<grpc::Status(const RequestType *, ResponseType *)> std::function<grpc::Status(RequestType *, ResponseType *)> invoke_method_;
invoke_method_;
grpc::ServerAsyncWriter<ResponseType> stream_; grpc::ServerAsyncWriter<ResponseType> stream_;
}; };
@ -551,8 +546,7 @@ static void RegisterGenericService(ServerBuilder *builder,
builder->RegisterAsyncGenericService(service); builder->RegisterAsyncGenericService(service);
} }
static Status ProcessSimpleRPC(const PayloadConfig &, static Status ProcessSimpleRPC(const PayloadConfig &, SimpleRequest *request,
const SimpleRequest *request,
SimpleResponse *response) { SimpleResponse *response) {
if (request->response_size() > 0) { if (request->response_size() > 0) {
if (!Server::SetPayload(request->response_type(), request->response_size(), if (!Server::SetPayload(request->response_type(), request->response_size(),
@ -560,12 +554,17 @@ static Status ProcessSimpleRPC(const PayloadConfig &,
return Status(grpc::StatusCode::INTERNAL, "Error creating payload."); return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
} }
} }
// We are done using the request. Clear it to reduce working memory.
// This proves to reduce cache misses in large message size cases.
request->Clear();
return Status::OK; return Status::OK;
} }
static Status ProcessGenericRPC(const PayloadConfig &payload_config, static Status ProcessGenericRPC(const PayloadConfig &payload_config,
const ByteBuffer *request, ByteBuffer *request, ByteBuffer *response) {
ByteBuffer *response) { // We are done using the request. Clear it to reduce working memory.
// This proves to reduce cache misses in large message size cases.
request->Clear();
int resp_size = payload_config.bytebuf_params().resp_size(); int resp_size = payload_config.bytebuf_params().resp_size();
std::unique_ptr<char[]> buf(new char[resp_size]); std::unique_ptr<char[]> buf(new char[resp_size]);
Slice slice(buf.get(), resp_size); Slice slice(buf.get(), resp_size);

@ -1 +1 @@
Subproject commit 44c25c892a6229b20db7cd9dc05584ea865896de Subproject commit 5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8

@ -907,6 +907,8 @@ src/core/ext/census/trace_string.h \
src/core/ext/census/tracing.cc \ src/core/ext/census/tracing.cc \
src/core/ext/census/tracing.h \ src/core/ext/census/tracing.h \
src/core/ext/filters/client_channel/README.md \ src/core/ext/filters/client_channel/README.md \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/backup_poller.h \
src/core/ext/filters/client_channel/channel_connectivity.cc \ src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \ src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel.h \ src/core/ext/filters/client_channel/client_channel.h \
@ -935,6 +937,8 @@ src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balan
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \
src/core/ext/filters/client_channel/lb_policy_factory.cc \ src/core/ext/filters/client_channel/lb_policy_factory.cc \
src/core/ext/filters/client_channel/lb_policy_factory.h \ src/core/ext/filters/client_channel/lb_policy_factory.h \
src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \

@ -22,6 +22,9 @@ ulimit -n 32768
# Move docker's storage location to scratch disk so we don't run out of space. # Move docker's storage location to scratch disk so we don't run out of space.
echo 'DOCKER_OPTS="${DOCKER_OPTS} --graph=/tmpfs/docker"' | sudo tee --append /etc/default/docker echo 'DOCKER_OPTS="${DOCKER_OPTS} --graph=/tmpfs/docker"' | sudo tee --append /etc/default/docker
# Use container registry mirror for pulling docker images (should make downloads faster)
# See https://cloud.google.com/container-registry/docs/using-dockerhub-mirroring
echo 'DOCKER_OPTS="${DOCKER_OPTS} --registry-mirror=https://mirror.gcr.io"' | sudo tee --append /etc/default/docker
sudo service docker restart sudo service docker restart
# Populate xdg-cache-home to workaround https://github.com/grpc/grpc/issues/11968 # Populate xdg-cache-home to workaround https://github.com/grpc/grpc/issues/11968

@ -27,5 +27,5 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/buildgen/generate_projects.sh tools/buildgen/generate_projects.sh
git -c user.name='foo' -c user.email='foo@google.com' commit -a -m 'Update submodule' git -c user.name='foo' -c user.email='foo@google.com' commit -a -m 'Update submodule'
tools/run_tests/run_tests_matrix.py -f linux --internal_ci --build_only tools/run_tests/run_tests_matrix.py -f linux --inner_jobs 4 -j 4 --internal_ci --build_only

@ -26,5 +26,5 @@ action {
env_vars { env_vars {
key: "RUN_TESTS_FLAGS" key: "RUN_TESTS_FLAGS"
value: "-f portability linux --internal_ci --build_only" value: "-f portability linux --inner_jobs 4 -j 4 --internal_ci --build_only"
} }

@ -23,7 +23,7 @@ _AVAILABLE_BENCHMARK_TESTS = [
'bm_metadata', 'bm_fullstack_trickle' 'bm_metadata', 'bm_fullstack_trickle'
] ]
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration', _INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration', 'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration', 'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration', 'cli_transport_stalls_per_iteration', 'nows_per_iteration', 'cli_transport_stalls_per_iteration',

@ -36,13 +36,13 @@ DOCKER_IMAGE_NAME=$(basename $DOCKERFILE_DIR)_$(sha1sum $DOCKERFILE_DIR/Dockerfi
# Pull the base image to force an update # Pull the base image to force an update
if [ "$DOCKER_BASE_IMAGE" != "" ] if [ "$DOCKER_BASE_IMAGE" != "" ]
then then
docker pull $DOCKER_BASE_IMAGE time docker pull $DOCKER_BASE_IMAGE
fi fi
if [ "$DOCKERHUB_ORGANIZATION" != "" ] if [ "$DOCKERHUB_ORGANIZATION" != "" ]
then then
DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME
docker pull $DOCKER_IMAGE_NAME time docker pull $DOCKER_IMAGE_NAME
else else
# Make sure docker image has been built. Should be instantaneous if so. # Make sure docker image has been built. Should be instantaneous if so.
docker build -t $DOCKER_IMAGE_NAME $DOCKERFILE_DIR docker build -t $DOCKER_IMAGE_NAME $DOCKERFILE_DIR

@ -40,7 +40,7 @@ DOCKER_IMAGE_NAME=$(basename $DOCKERFILE_DIR)_$(sha1sum $DOCKERFILE_DIR/Dockerfi
if [ "$DOCKERHUB_ORGANIZATION" != "" ] if [ "$DOCKERHUB_ORGANIZATION" != "" ]
then then
DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME
docker pull $DOCKER_IMAGE_NAME time docker pull $DOCKER_IMAGE_NAME
else else
# Make sure docker image has been built. Should be instantaneous if so. # Make sure docker image has been built. Should be instantaneous if so.
docker build -t $DOCKER_IMAGE_NAME $DOCKERFILE_DIR docker build -t $DOCKER_IMAGE_NAME $DOCKERFILE_DIR

@ -78,7 +78,7 @@ fi
if [ "$DOCKERHUB_ORGANIZATION" != "" ] if [ "$DOCKERHUB_ORGANIZATION" != "" ]
then then
BASE_IMAGE=$DOCKERHUB_ORGANIZATION/$BASE_IMAGE BASE_IMAGE=$DOCKERHUB_ORGANIZATION/$BASE_IMAGE
docker pull $BASE_IMAGE time docker pull $BASE_IMAGE
else else
# Make sure docker image has been built. Should be instantaneous if so. # Make sure docker image has been built. Should be instantaneous if so.
docker build -t $BASE_IMAGE --force-rm=true tools/dockerfile/interoptest/$BASE_NAME || exit $? docker build -t $BASE_IMAGE --force-rm=true tools/dockerfile/interoptest/$BASE_NAME || exit $?

@ -7453,7 +7453,6 @@
"headers": [ "headers": [
"third_party/benchmark/include/benchmark/benchmark.h", "third_party/benchmark/include/benchmark/benchmark.h",
"third_party/benchmark/include/benchmark/benchmark_api.h", "third_party/benchmark/include/benchmark/benchmark_api.h",
"third_party/benchmark/include/benchmark/macros.h",
"third_party/benchmark/include/benchmark/reporter.h", "third_party/benchmark/include/benchmark/reporter.h",
"third_party/benchmark/src/arraysize.h", "third_party/benchmark/src/arraysize.h",
"third_party/benchmark/src/benchmark_api_internal.h", "third_party/benchmark/src/benchmark_api_internal.h",
@ -7461,6 +7460,7 @@
"third_party/benchmark/src/colorprint.h", "third_party/benchmark/src/colorprint.h",
"third_party/benchmark/src/commandlineflags.h", "third_party/benchmark/src/commandlineflags.h",
"third_party/benchmark/src/complexity.h", "third_party/benchmark/src/complexity.h",
"third_party/benchmark/src/counter.h",
"third_party/benchmark/src/cycleclock.h", "third_party/benchmark/src/cycleclock.h",
"third_party/benchmark/src/internal_macros.h", "third_party/benchmark/src/internal_macros.h",
"third_party/benchmark/src/log.h", "third_party/benchmark/src/log.h",
@ -8467,6 +8467,7 @@
"grpc_deadline_filter" "grpc_deadline_filter"
], ],
"headers": [ "headers": [
"src/core/ext/filters/client_channel/backup_poller.h",
"src/core/ext/filters/client_channel/client_channel.h", "src/core/ext/filters/client_channel/client_channel.h",
"src/core/ext/filters/client_channel/client_channel_factory.h", "src/core/ext/filters/client_channel/client_channel_factory.h",
"src/core/ext/filters/client_channel/connector.h", "src/core/ext/filters/client_channel/connector.h",
@ -8490,6 +8491,8 @@
"language": "c", "language": "c",
"name": "grpc_client_channel", "name": "grpc_client_channel",
"src": [ "src": [
"src/core/ext/filters/client_channel/backup_poller.cc",
"src/core/ext/filters/client_channel/backup_poller.h",
"src/core/ext/filters/client_channel/channel_connectivity.cc", "src/core/ext/filters/client_channel/channel_connectivity.cc",
"src/core/ext/filters/client_channel/client_channel.cc", "src/core/ext/filters/client_channel/client_channel.cc",
"src/core/ext/filters/client_channel/client_channel.h", "src/core/ext/filters/client_channel/client_channel.h",
@ -8684,7 +8687,8 @@
"deps": [ "deps": [
"gpr", "gpr",
"grpc_base", "grpc_base",
"grpc_client_channel" "grpc_client_channel",
"grpc_lb_subchannel_list"
], ],
"headers": [], "headers": [],
"is_filegroup": true, "is_filegroup": true,
@ -8700,7 +8704,8 @@
"deps": [ "deps": [
"gpr", "gpr",
"grpc_base", "grpc_base",
"grpc_client_channel" "grpc_client_channel",
"grpc_lb_subchannel_list"
], ],
"headers": [], "headers": [],
"is_filegroup": true, "is_filegroup": true,
@ -8712,6 +8717,25 @@
"third_party": false, "third_party": false,
"type": "filegroup" "type": "filegroup"
}, },
{
"deps": [
"gpr",
"grpc_base",
"grpc_client_channel"
],
"headers": [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
],
"is_filegroup": true,
"language": "c",
"name": "grpc_lb_subchannel_list",
"src": [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc",
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
],
"third_party": false,
"type": "filegroup"
},
{ {
"deps": [ "deps": [
"gpr", "gpr",

@ -26,7 +26,7 @@ want_submodules=`mktemp /tmp/submXXXXXX`
git submodule | awk '{ print $1 }' | sort > $submodules git submodule | awk '{ print $1 }' | sort > $submodules
cat << EOF | awk '{ print $1 }' | sort > $want_submodules cat << EOF | awk '{ print $1 }' | sort > $want_submodules
44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8) 5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8 third_party/benchmark (v1.2.0)
be2ee342d3781ddb954f91f8a7e660c6f59e87e5 third_party/boringssl (heads/chromium-stable) be2ee342d3781ddb954f91f8a7e660c6f59e87e5 third_party/boringssl (heads/chromium-stable)
886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7) 886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e third_party/gflags (v2.2.0) 30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e third_party/gflags (v2.2.0)

Loading…
Cancel
Save