Merge branch 'master' of git://github.com/grpc/grpc

pull/22638/head
Taras Galkovskyi 5 years ago
commit 3ff24dd63a
  1. 2
      .gitignore
  2. 105
      BUILD
  3. 8
      BUILD.gn
  4. 14
      CMakeLists.txt
  5. 16
      Makefile
  6. 18
      build_autogenerated.yaml
  7. 8
      config.m4
  8. 8
      config.w32
  9. 6
      doc/environment_variables.md
  10. 51
      examples/cpp/README.md
  11. 488
      examples/cpp/cpptutorial.md
  12. 266
      examples/cpp/helloworld/README.md
  13. 2
      examples/python/multiprocessing/BUILD
  14. 29
      examples/python/multiprocessing/README.md
  15. 4
      examples/python/multiprocessing/client.py
  16. 10
      examples/python/multiprocessing/server.py
  17. 2
      gRPC-C++.podspec
  18. 8
      gRPC-Core.podspec
  19. 7
      grpc.gemspec
  20. 14
      grpc.gyp
  21. 21
      include/grpc/impl/codegen/grpc_types.h
  22. 7
      package.xml
  23. 83
      src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
  24. 99
      src/core/ext/filters/client_channel/lb_policy/address_filtering.h
  25. 875
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  26. 722
      src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
  27. 65
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  28. 907
      src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
  29. 524
      src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
  30. 1754
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  31. 3
      src/core/ext/filters/client_channel/lb_policy/xds/xds.h
  32. 2
      src/core/ext/filters/client_channel/lb_policy_registry.cc
  33. 7
      src/core/ext/filters/client_channel/xds/xds_api.cc
  34. 18
      src/core/ext/filters/client_channel/xds/xds_client_stats.h
  35. 479
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  36. 7
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  37. 1
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  38. 6
      src/core/ext/transport/chttp2/transport/flow_control.cc
  39. 13
      src/core/ext/transport/chttp2/transport/writing.cc
  40. 12
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  41. 13
      src/core/lib/channel/channel_stack.h
  42. 2
      src/core/lib/iomgr/socket_utils_common_posix.cc
  43. 25
      src/core/lib/security/security_connector/alts/alts_security_connector.cc
  44. 1
      src/core/lib/surface/server.cc
  45. 20
      src/core/plugin_registry/grpc_plugin_registry.cc
  46. 20
      src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
  47. 9
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  48. 12
      src/core/tsi/alts/handshaker/alts_handshaker_client.h
  49. 34
      src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
  50. 10
      src/core/tsi/alts/handshaker/alts_tsi_handshaker.h
  51. 2
      src/php/README.md
  52. 13
      src/python/grpcio/grpc/experimental/aio/_base_call.py
  53. 28
      src/python/grpcio/grpc/experimental/aio/_call.py
  54. 7
      src/python/grpcio/grpc/experimental/aio/_interceptor.py
  55. 6
      src/python/grpcio/grpc_core_dependencies.py
  56. 1
      src/python/grpcio_tests/tests_aio/tests.json
  57. 59
      src/python/grpcio_tests/tests_aio/unit/call_test.py
  58. 159
      src/python/grpcio_tests/tests_aio/unit/wait_for_connection_test.py
  59. 5
      templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template
  60. 6
      templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template
  61. 8
      test/core/client_channel/service_config_test.cc
  62. 2
      test/core/end2end/README
  63. 34
      test/core/end2end/fixtures/h2_oauth2.cc
  64. 48
      test/core/end2end/fixtures/h2_ssl.cc
  65. 51
      test/core/end2end/fixtures/h2_ssl_cred_reload.cc
  66. 62
      test/core/end2end/fixtures/h2_ssl_proxy.cc
  67. 70
      test/core/end2end/fixtures/h2_tls.cc
  68. 402
      test/core/end2end/gen_build_yaml.py
  69. 13
      test/core/end2end/generate_tests.bzl
  70. 15
      test/core/security/BUILD
  71. 58
      test/core/security/grpc_tls_credentials_options_test.cc
  72. 33
      test/core/security/ssl_server_fuzzer.cc
  73. 33
      test/core/security/tls_security_connector_test.cc
  74. 5
      test/core/surface/BUILD
  75. 28
      test/core/surface/sequential_connectivity_test.cc
  76. 20
      test/core/tsi/alts/handshaker/alts_handshaker_client_test.cc
  77. 31
      test/core/tsi/alts/handshaker/alts_tsi_handshaker_test.cc
  78. 4
      test/core/util/grpc_fuzzer.bzl
  79. 6
      test/cpp/client/BUILD
  80. 302
      test/cpp/end2end/xds_end2end_test.cc
  81. 1
      test/cpp/microbenchmarks/BUILD
  82. 6
      test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc
  83. 3
      tools/bazel.rc
  84. 2
      tools/buildgen/generate_build_additions.sh
  85. 5
      tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile
  86. 23
      tools/dockerfile/test/python_stretch_default_x64/Dockerfile
  87. 7
      tools/doxygen/Doxyfile.c++.internal
  88. 7
      tools/doxygen/Doxyfile.core.internal
  89. 2
      tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh
  90. 2
      tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh
  91. 42
      tools/run_tests/generated/tests.json
  92. 29
      tools/run_tests/helper_scripts/build_python.sh
  93. 10
      tools/run_tests/run_tests.py
  94. 317
      tools/run_tests/run_xds_tests.py

2
.gitignore vendored

@ -136,7 +136,7 @@ bm_diff_old/
bm_*.json
# cmake build files
/cmake/build
**/cmake/build/
# Visual Studio Code artifacts
.vscode/*

105
BUILD

@ -319,8 +319,9 @@ grpc_cc_library(
deps = [
"grpc_common",
"grpc_lb_policy_cds",
"grpc_lb_policy_eds",
"grpc_lb_policy_grpclb",
"grpc_lb_policy_xds",
"grpc_lb_policy_lrs",
"grpc_resolver_xds",
],
)
@ -337,8 +338,9 @@ grpc_cc_library(
deps = [
"grpc_common",
"grpc_lb_policy_cds_secure",
"grpc_lb_policy_eds_secure",
"grpc_lb_policy_grpclb_secure",
"grpc_lb_policy_xds_secure",
"grpc_lb_policy_lrs_secure",
"grpc_resolver_xds_secure",
"grpc_secure",
"grpc_transport_chttp2_client_secure",
@ -1023,7 +1025,9 @@ grpc_cc_library(
"grpc_deadline_filter",
"grpc_client_authority_filter",
"grpc_lb_policy_pick_first",
"grpc_lb_policy_priority",
"grpc_lb_policy_round_robin",
"grpc_lb_policy_weighted_target",
"grpc_client_idle_filter",
"grpc_max_age_filter",
"grpc_message_size_filter",
@ -1357,41 +1361,75 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_lb_policy_xds",
name = "grpc_lb_policy_cds",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_xds_client",
],
)
grpc_cc_library(
name = "grpc_lb_policy_cds_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_xds_client_secure",
],
)
grpc_cc_library(
name = "grpc_lb_policy_eds",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
"grpc_xds_client",
],
)
grpc_cc_library(
name = "grpc_lb_policy_xds_secure",
name = "grpc_lb_policy_eds_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
"grpc_xds_client_secure",
],
)
grpc_cc_library(
name = "grpc_lb_policy_cds",
name = "grpc_lb_policy_lrs",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
],
language = "c++",
deps = [
@ -1402,9 +1440,9 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_lb_policy_cds_secure",
name = "grpc_lb_policy_lrs_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
],
language = "c++",
deps = [
@ -1414,6 +1452,24 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_lb_address_filtering",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/address_filtering.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/address_filtering.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library(
name = "grpc_lb_subchannel_list",
hdrs = [
@ -1452,6 +1508,35 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_lb_policy_priority",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/priority/priority.cc",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
],
)
grpc_cc_library(
name = "grpc_lb_policy_weighted_target",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
],
)
grpc_cc_library(
name = "lb_server_load_reporting_filter",
srcs = [

@ -223,6 +223,8 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/http_proxy.h",
"src/core/ext/filters/client_channel/lb_policy.cc",
"src/core/ext/filters/client_channel/lb_policy.h",
"src/core/ext/filters/client_channel/lb_policy/address_filtering.cc",
"src/core/ext/filters/client_channel/lb_policy/address_filtering.h",
"src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc",
"src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc",
@ -238,10 +240,13 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc",
"src/core/ext/filters/client_channel/lb_policy/priority/priority.cc",
"src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc",
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
"src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
"src/core/ext/filters/client_channel/lb_policy_factory.h",
"src/core/ext/filters/client_channel/lb_policy_registry.cc",
@ -962,6 +967,7 @@ config("grpc_config") {
":address_sorting",
":upb",
":absl/types:optional",
":absl/strings:strings",
":absl/container:inlined_vector",
"//third_party/cares",
":address_sorting",

@ -1315,6 +1315,7 @@ add_library(grpc
src/core/ext/filters/client_channel/http_connect_handshaker.cc
src/core/ext/filters/client_channel/http_proxy.cc
src/core/ext/filters/client_channel/lb_policy.cc
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@ -1323,9 +1324,12 @@ add_library(grpc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
src/core/ext/filters/client_channel/lb_policy_registry.cc
src/core/ext/filters/client_channel/local_subchannel_pool.cc
src/core/ext/filters/client_channel/parse_address.cc
@ -1743,6 +1747,7 @@ target_link_libraries(grpc
address_sorting
upb
absl::optional
absl::strings
absl::inlined_vector
)
if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC)
@ -1969,6 +1974,7 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/http_connect_handshaker.cc
src/core/ext/filters/client_channel/http_proxy.cc
src/core/ext/filters/client_channel/lb_policy.cc
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@ -1977,9 +1983,12 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
src/core/ext/filters/client_channel/lb_policy_registry.cc
src/core/ext/filters/client_channel/local_subchannel_pool.cc
src/core/ext/filters/client_channel/parse_address.cc
@ -2321,6 +2330,7 @@ target_link_libraries(grpc_unsecure
address_sorting
upb
absl::optional
absl::strings
absl::inlined_vector
)
if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC)

@ -2176,6 +2176,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/bm_error || ( echo test bm_error failed ; exit 1 )
$(E) "[RUN] Testing bm_fullstack_streaming_ping_pong"
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong || ( echo test bm_fullstack_streaming_ping_pong failed ; exit 1 )
$(E) "[RUN] Testing bm_fullstack_streaming_pump"
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump || ( echo test bm_fullstack_streaming_pump failed ; exit 1 )
$(E) "[RUN] Testing bm_fullstack_unary_ping_pong"
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong || ( echo test bm_fullstack_unary_ping_pong failed ; exit 1 )
$(E) "[RUN] Testing bm_metadata"
@ -2204,8 +2206,6 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/cli_call_test || ( echo test cli_call_test failed ; exit 1 )
$(E) "[RUN] Testing client_callback_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_callback_end2end_test || ( echo test client_callback_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing client_channel_stress_test"
$(Q) $(BINDIR)/$(CONFIG)/client_channel_stress_test || ( echo test client_channel_stress_test failed ; exit 1 )
$(E) "[RUN] Testing client_interceptors_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_interceptors_end2end_test || ( echo test client_interceptors_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing codegen_test_full"
@ -3640,6 +3640,7 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
@ -3648,9 +3649,12 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \
@ -4269,6 +4273,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
@ -4277,9 +4282,12 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \

@ -382,6 +382,7 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.h
- src/core/ext/filters/client_channel/http_proxy.h
- src/core/ext/filters/client_channel/lb_policy.h
- src/core/ext/filters/client_channel/lb_policy/address_filtering.h
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@ -740,6 +741,7 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.cc
- src/core/ext/filters/client_channel/http_proxy.cc
- src/core/ext/filters/client_channel/lb_policy.cc
- src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@ -748,9 +750,12 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
- src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
- src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
- src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
- src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
- src/core/ext/filters/client_channel/lb_policy_registry.cc
- src/core/ext/filters/client_channel/local_subchannel_pool.cc
- src/core/ext/filters/client_channel/parse_address.cc
@ -1132,6 +1137,7 @@ libs:
- address_sorting
- upb
- absl/types:optional
- absl/strings:strings
- absl/container:inlined_vector
baselib: true
dll: true
@ -1278,6 +1284,7 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.h
- src/core/ext/filters/client_channel/http_proxy.h
- src/core/ext/filters/client_channel/lb_policy.h
- src/core/ext/filters/client_channel/lb_policy/address_filtering.h
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@ -1571,6 +1578,7 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.cc
- src/core/ext/filters/client_channel/http_proxy.cc
- src/core/ext/filters/client_channel/lb_policy.cc
- src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@ -1579,9 +1587,12 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
- src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
- src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
- src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
- src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
- src/core/ext/filters/client_channel/lb_policy_registry.cc
- src/core/ext/filters/client_channel/local_subchannel_pool.cc
- src/core/ext/filters/client_channel/parse_address.cc
@ -1888,6 +1899,7 @@ libs:
- address_sorting
- upb
- absl/types:optional
- absl/strings:strings
- absl/container:inlined_vector
baselib: true
dll: true
@ -5115,7 +5127,6 @@ targets:
- posix
- name: bm_fullstack_streaming_pump
build: test
run: false
language: c++
headers:
- test/cpp/microbenchmarks/fullstack_streaming_pump.h
@ -5499,6 +5510,7 @@ targets:
- name: client_channel_stress_test
gtest: true
build: test
run: false
language: c++
headers:
- test/cpp/end2end/test_service_impl.h

@ -50,6 +50,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
@ -58,9 +59,12 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \
@ -821,7 +825,9 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/priority)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/round_robin)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/weighted_target)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/xds)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns/c_ares)

@ -19,6 +19,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\http_connect_handshaker.cc " +
"src\\core\\ext\\filters\\client_channel\\http_proxy.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\address_filtering.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\child_policy_handler.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\client_load_reporting_filter.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb.cc " +
@ -27,9 +28,12 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_client_stats.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\load_balancer_api.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\priority\\priority.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target\\weighted_target.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\cds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\eds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\lrs.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\local_subchannel_pool.cc " +
"src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
@ -821,7 +825,9 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\priority");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\xds");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\dns");

@ -57,6 +57,7 @@ some configuration as environment variables that can be set.
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- cronet - traces state in the cronet transport engine
- eds_lb - traces eds LB policy
- executor - traces grpc's internal thread pool ('the executor')
- glb - traces the grpclb load balancer
- handshaker - traces handshaking state
@ -65,13 +66,16 @@ some configuration as environment variables that can be set.
- http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport
- http_keepalive - traces gRPC keepalive pings
- flowctl - traces http2 flow control
- lrs_lb - traces lrs LB policy
- op_failure - traces error information when failure is pushed onto a
completion queue
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- pollable_refcount - traces reference counting of 'pollable' objects (only
in DEBUG)
- priority_lb - traces priority LB policy
- resource_quota - trace resource quota objects internals
- round_robin - traces the round_robin load balancing policy
- queue_pluck
@ -84,8 +88,8 @@ some configuration as environment variables that can be set.
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
- tsi - traces tsi transport security
- weighted_target_lb - traces weighted_target LB policy
- xds_client - traces xds client
- xds_lb - traces xds LB policy
- xds_resolver - traces xds resolver
The following tracers will only run in binaries built in DEBUG mode. This is

@ -1,44 +1,13 @@
# gRPC in 3 minutes (C++)
# gRPC C++ Examples
## Installation
- **[Hello World][]!** Eager to run your first gRPC example? You'll find
instructions for building gRPC and running a simple "Hello World" app in [Quick Start][].
- **[Route Guide][].** For a basic tutorial on gRPC see [gRPC Basics][].
To install gRPC on your system, follow the instructions to build from source
[here](../../BUILDING.md). This also installs the protocol buffer compiler
`protoc` (if you don't have it already), and the C++ gRPC plugin for `protoc`.
For information about the other examples in this directory, see their respective
README files.
## Hello C++ gRPC!
Here's how to build and run the C++ implementation of the [Hello
World](../protos/helloworld.proto) example used in [Getting started](..).
### Client and server implementations
The client implementation is at [greeter_client.cc](helloworld/greeter_client.cc).
The server implementation is at [greeter_server.cc](helloworld/greeter_server.cc).
### Try it!
Build client and server:
```sh
$ make
```
Run the server, which will listen on port 50051:
```sh
$ ./greeter_server
```
Run the client (in a different terminal):
```sh
$ ./greeter_client
```
If things go smoothly, you will see the "Greeter received: Hello world" in the
client side output.
## Tutorial
You can find a more detailed tutorial in [gRPC Basics: C++](cpptutorial.md)
[gRPC Basics]: https://grpc.io/docs/tutorials/basic/cpp
[Hello World]: helloworld
[Quick Start]: https://grpc.io/docs/quickstart/cpp
[Route Guide]: route_guide

@ -1,488 +0,0 @@
# gRPC Basics: C++
This tutorial provides a basic C++ programmer's introduction to working with
gRPC. By walking through this example you'll learn how to:
- Define a service in a `.proto` file.
- Generate server and client code using the protocol buffer compiler.
- Use the C++ gRPC API to write a simple client and server for your service.
It assumes that you are familiar with
[protocol buffers](https://developers.google.com/protocol-buffers/docs/overview).
Note that the example in this tutorial uses the proto3 version of the protocol
buffers language, which is currently in alpha release: you can find out more in
the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3)
and see the [release notes](https://github.com/google/protobuf/releases) for the
new version in the protocol buffers Github repository.
## Why use gRPC?
Our example is a simple route mapping application that lets clients get
information about features on their route, create a summary of their route, and
exchange route information such as traffic updates with the server and other
clients.
With gRPC we can define our service once in a `.proto` file and implement clients
and servers in any of gRPC's supported languages, which in turn can be run in
environments ranging from servers inside Google to your own tablet - all the
complexity of communication between different languages and environments is
handled for you by gRPC. We also get all the advantages of working with protocol
buffers, including efficient serialization, a simple IDL, and easy interface
updating.
## Example code and setup
The example code for our tutorial is in [examples/cpp/route_guide](route_guide).
You also should have the relevant tools installed to generate the server and
client interface code - if you don't already, follow the setup instructions in
[BUILDING.md](../../BUILDING.md).
## Defining the service
Our first step is to define the gRPC *service* and the method *request* and
*response* types using
[protocol buffers](https://developers.google.com/protocol-buffers/docs/overview).
You can see the complete `.proto` file in
[`examples/protos/route_guide.proto`](../protos/route_guide.proto).
To define a service, you specify a named `service` in your `.proto` file:
```protobuf
service RouteGuide {
...
}
```
Then you define `rpc` methods inside your service definition, specifying their
request and response types. gRPC lets you define four kinds of service method,
all of which are used in the `RouteGuide` service:
- A *simple RPC* where the client sends a request to the server using the stub
and waits for a response to come back, just like a normal function call.
```protobuf
// Obtains the feature at a given position.
rpc GetFeature(Point) returns (Feature) {}
```
- A *server-side streaming RPC* where the client sends a request to the server
and gets a stream to read a sequence of messages back. The client reads from
the returned stream until there are no more messages. As you can see in our
example, you specify a server-side streaming method by placing the `stream`
keyword before the *response* type.
```protobuf
// Obtains the Features available within the given Rectangle. Results are
// streamed rather than returned at once (e.g. in a response message with a
// repeated field), as the rectangle may cover a large area and contain a
// huge number of features.
rpc ListFeatures(Rectangle) returns (stream Feature) {}
```
- A *client-side streaming RPC* where the client writes a sequence of messages
and sends them to the server, again using a provided stream. Once the client
has finished writing the messages, it waits for the server to read them all
and return its response. You specify a client-side streaming method by placing
the `stream` keyword before the *request* type.
```protobuf
// Accepts a stream of Points on a route being traversed, returning a
// RouteSummary when traversal is completed.
rpc RecordRoute(stream Point) returns (RouteSummary) {}
```
- A *bidirectional streaming RPC* where both sides send a sequence of messages
using a read-write stream. The two streams operate independently, so clients
and servers can read and write in whatever order they like: for example, the
server could wait to receive all the client messages before writing its
responses, or it could alternately read a message then write a message, or
some other combination of reads and writes. The order of messages in each
stream is preserved. You specify this type of method by placing the `stream`
keyword before both the request and the response.
```protobuf
// Accepts a stream of RouteNotes sent while a route is being traversed,
// while receiving other RouteNotes (e.g. from other users).
rpc RouteChat(stream RouteNote) returns (stream RouteNote) {}
```
Our `.proto` file also contains protocol buffer message type definitions for all
the request and response types used in our service methods - for example, here's
the `Point` message type:
```protobuf
// Points are represented as latitude-longitude pairs in the E7 representation
// (degrees multiplied by 10**7 and rounded to the nearest integer).
// Latitudes should be in the range +/- 90 degrees and longitude should be in
// the range +/- 180 degrees (inclusive).
message Point {
int32 latitude = 1;
int32 longitude = 2;
}
```
## Generating client and server code
Next we need to generate the gRPC client and server interfaces from our `.proto`
service definition. We do this using the protocol buffer compiler `protoc` with
a special gRPC C++ plugin.
For simplicity, we've provided a [Makefile](route_guide/Makefile) that runs
`protoc` for you with the appropriate plugin, input, and output (if you want to
run this yourself, make sure you've installed protoc and followed the gRPC code
[installation instructions](../../BUILDING.md) first):
```shell
$ make route_guide.grpc.pb.cc route_guide.pb.cc
```
which actually runs:
```shell
$ protoc -I ../../protos --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_cpp_plugin` ../../protos/route_guide.proto
$ protoc -I ../../protos --cpp_out=. ../../protos/route_guide.proto
```
Running this command generates the following files in your current directory:
- `route_guide.pb.h`, the header which declares your generated message classes
- `route_guide.pb.cc`, which contains the implementation of your message classes
- `route_guide.grpc.pb.h`, the header which declares your generated service
classes
- `route_guide.grpc.pb.cc`, which contains the implementation of your service
classes
These contain:
- All the protocol buffer code to populate, serialize, and retrieve our request
and response message types
- A class called `RouteGuide` that contains
- a remote interface type (or *stub*) for clients to call with the methods
defined in the `RouteGuide` service.
- two abstract interfaces for servers to implement, also with the methods
defined in the `RouteGuide` service.
<a name="server"></a>
## Creating the server
First let's look at how we create a `RouteGuide` server. If you're only
interested in creating gRPC clients, you can skip this section and go straight
to [Creating the client](#client) (though you might find it interesting
anyway!).
There are two parts to making our `RouteGuide` service do its job:
- Implementing the service interface generated from our service definition:
doing the actual "work" of our service.
- Running a gRPC server to listen for requests from clients and return the
service responses.
You can find our example `RouteGuide` server in
[route_guide/route_guide_server.cc](route_guide/route_guide_server.cc). Let's
take a closer look at how it works.
### Implementing RouteGuide
As you can see, our server has a `RouteGuideImpl` class that implements the
generated `RouteGuide::Service` interface:
```cpp
class RouteGuideImpl final : public RouteGuide::Service {
...
}
```
In this case we're implementing the *synchronous* version of `RouteGuide`, which
provides our default gRPC server behaviour. It's also possible to implement an
asynchronous interface, `RouteGuide::AsyncService`, which allows you to further
customize your server's threading behaviour, though we won't look at this in
this tutorial.
`RouteGuideImpl` implements all our service methods. Let's look at the simplest
type first, `GetFeature`, which just gets a `Point` from the client and returns
the corresponding feature information from its database in a `Feature`.
```cpp
Status GetFeature(ServerContext* context, const Point* point,
Feature* feature) override {
feature->set_name(GetFeatureName(*point, feature_list_));
feature->mutable_location()->CopyFrom(*point);
return Status::OK;
}
```
The method is passed a context object for the RPC, the client's `Point` protocol
buffer request, and a `Feature` protocol buffer to fill in with the response
information. In the method we populate the `Feature` with the appropriate
information, and then `return` with an `OK` status to tell gRPC that we've
finished dealing with the RPC and that the `Feature` can be returned to the
client.
Now let's look at something a bit more complicated - a streaming RPC.
`ListFeatures` is a server-side streaming RPC, so we need to send back multiple
`Feature`s to our client.
```cpp
Status ListFeatures(ServerContext* context, const Rectangle* rectangle,
ServerWriter<Feature>* writer) override {
auto lo = rectangle->lo();
auto hi = rectangle->hi();
long left = std::min(lo.longitude(), hi.longitude());
long right = std::max(lo.longitude(), hi.longitude());
long top = std::max(lo.latitude(), hi.latitude());
long bottom = std::min(lo.latitude(), hi.latitude());
for (const Feature& f : feature_list_) {
if (f.location().longitude() >= left &&
f.location().longitude() <= right &&
f.location().latitude() >= bottom &&
f.location().latitude() <= top) {
writer->Write(f);
}
}
return Status::OK;
}
```
As you can see, instead of getting simple request and response objects in our
method parameters, this time we get a request object (the `Rectangle` in which
our client wants to find `Feature`s) and a special `ServerWriter` object. In the
method, we populate as many `Feature` objects as we need to return, writing them
to the `ServerWriter` using its `Write()` method. Finally, as in our simple RPC,
we `return Status::OK` to tell gRPC that we've finished writing responses.
If you look at the client-side streaming method `RecordRoute` you'll see it's
quite similar, except this time we get a `ServerReader` instead of a request
object and a single response. We use the `ServerReader`s `Read()` method to
repeatedly read in our client's requests to a request object (in this case a
`Point`) until there are no more messages: the server needs to check the return
value of `Read()` after each call. If `true`, the stream is still good and it
can continue reading; if `false` the message stream has ended.
```cpp
while (stream->Read(&point)) {
...//process client input
}
```
Finally, let's look at our bidirectional streaming RPC `RouteChat()`.
```cpp
Status RouteChat(ServerContext* context,
ServerReaderWriter<RouteNote, RouteNote>* stream) override {
std::vector<RouteNote> received_notes;
RouteNote note;
while (stream->Read(&note)) {
for (const RouteNote& n : received_notes) {
if (n.location().latitude() == note.location().latitude() &&
n.location().longitude() == note.location().longitude()) {
stream->Write(n);
}
}
received_notes.push_back(note);
}
return Status::OK;
}
```
This time we get a `ServerReaderWriter` that can be used to read *and* write
messages. The syntax for reading and writing here is exactly the same as for our
client-streaming and server-streaming methods. Although each side will always
get the other's messages in the order they were written, both the client and
server can read and write in any order — the streams operate completely
independently.
### Starting the server
Once we've implemented all our methods, we also need to start up a gRPC server
so that clients can actually use our service. The following snippet shows how we
do this for our `RouteGuide` service:
```cpp
void RunServer(const std::string& db_path) {
std::string server_address("0.0.0.0:50051");
RouteGuideImpl service(db_path);
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "Server listening on " << server_address << std::endl;
server->Wait();
}
```
As you can see, we build and start our server using a `ServerBuilder`. To do this, we:
1. Create an instance of our service implementation class `RouteGuideImpl`.
1. Create an instance of the factory `ServerBuilder` class.
1. Specify the address and port we want to use to listen for client requests
using the builder's `AddListeningPort()` method.
1. Register our service implementation with the builder.
1. Call `BuildAndStart()` on the builder to create and start an RPC server for
our service.
1. Call `Wait()` on the server to do a blocking wait until process is killed or
`Shutdown()` is called.
<a name="client"></a>
## Creating the client
In this section, we'll look at creating a C++ client for our `RouteGuide`
service. You can see our complete example client code in
[route_guide/route_guide_client.cc](route_guide/route_guide_client.cc).
### Creating a stub
To call service methods, we first need to create a *stub*.
First we need to create a gRPC *channel* for our stub, specifying the server
address and port we want to connect to without SSL:
```cpp
grpc::CreateChannel("localhost:50051", grpc::InsecureChannelCredentials());
```
Now we can use the channel to create our stub using the `NewStub` method
provided in the `RouteGuide` class we generated from our `.proto`.
```cpp
public:
RouteGuideClient(std::shared_ptr<Channel> channel, const std::string& db)
: stub_(RouteGuide::NewStub(channel)) {
...
}
```
### Calling service methods
Now let's look at how we call our service methods. Note that in this tutorial
we're calling the *blocking/synchronous* versions of each method: this means
that the RPC call waits for the server to respond, and will either return a
response or raise an exception.
#### Simple RPC
Calling the simple RPC `GetFeature` is nearly as straightforward as calling a
local method.
```cpp
Point point;
Feature feature;
point = MakePoint(409146138, -746188906);
GetOneFeature(point, &feature);
...
bool GetOneFeature(const Point& point, Feature* feature) {
ClientContext context;
Status status = stub_->GetFeature(&context, point, feature);
...
}
```
As you can see, we create and populate a request protocol buffer object (in our
case `Point`), and create a response protocol buffer object for the server to
fill in. We also create a `ClientContext` object for our call - you can
optionally set RPC configuration values on this object, such as deadlines,
though for now we'll use the default settings. Note that you cannot reuse this
object between calls. Finally, we call the method on the stub, passing it the
context, request, and response. If the method returns `OK`, then we can read the
response information from the server from our response object.
```cpp
std::cout << "Found feature called " << feature->name() << " at "
<< feature->location().latitude()/kCoordFactor_ << ", "
<< feature->location().longitude()/kCoordFactor_ << std::endl;
```
#### Streaming RPCs
Now let's look at our streaming methods. If you've already read [Creating the
server](#server) some of this may look very familiar - streaming RPCs are
implemented in a similar way on both sides. Here's where we call the server-side
streaming method `ListFeatures`, which returns a stream of geographical
`Feature`s:
```cpp
std::unique_ptr<ClientReader<Feature> > reader(
stub_->ListFeatures(&context, rect));
while (reader->Read(&feature)) {
std::cout << "Found feature called "
<< feature.name() << " at "
<< feature.location().latitude()/kCoordFactor_ << ", "
<< feature.location().longitude()/kCoordFactor_ << std::endl;
}
Status status = reader->Finish();
```
Instead of passing the method a context, request, and response, we pass it a
context and request and get a `ClientReader` object back. The client can use the
`ClientReader` to read the server's responses. We use the `ClientReader`s
`Read()` method to repeatedly read in the server's responses to a response
protocol buffer object (in this case a `Feature`) until there are no more
messages: the client needs to check the return value of `Read()` after each
call. If `true`, the stream is still good and it can continue reading; if
`false` the message stream has ended. Finally, we call `Finish()` on the stream
to complete the call and get our RPC status.
The client-side streaming method `RecordRoute` is similar, except there we pass
the method a context and response object and get back a `ClientWriter`.
```cpp
std::unique_ptr<ClientWriter<Point> > writer(
stub_->RecordRoute(&context, &stats));
for (int i = 0; i < kPoints; i++) {
const Feature& f = feature_list_[feature_distribution(generator)];
std::cout << "Visiting point "
<< f.location().latitude()/kCoordFactor_ << ", "
<< f.location().longitude()/kCoordFactor_ << std::endl;
if (!writer->Write(f.location())) {
// Broken stream.
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(
delay_distribution(generator)));
}
writer->WritesDone();
Status status = writer->Finish();
if (status.IsOk()) {
std::cout << "Finished trip with " << stats.point_count() << " points\n"
<< "Passed " << stats.feature_count() << " features\n"
<< "Travelled " << stats.distance() << " meters\n"
<< "It took " << stats.elapsed_time() << " seconds"
<< std::endl;
} else {
std::cout << "RecordRoute rpc failed." << std::endl;
}
```
Once we've finished writing our client's requests to the stream using `Write()`,
we need to call `WritesDone()` on the stream to let gRPC know that we've
finished writing, then `Finish()` to complete the call and get our RPC status.
If the status is `OK`, our response object that we initially passed to
`RecordRoute()` will be populated with the server's response.
Finally, let's look at our bidirectional streaming RPC `RouteChat()`. In this
case, we just pass a context to the method and get back a `ClientReaderWriter`,
which we can use to both write and read messages.
```cpp
std::shared_ptr<ClientReaderWriter<RouteNote, RouteNote> > stream(
stub_->RouteChat(&context));
```
The syntax for reading and writing here is exactly the same as for our
client-streaming and server-streaming methods. Although each side will always
get the other's messages in the order they were written, both the client and
server can read and write in any order — the streams operate completely
independently.
## Try it out!
Build client and server:
```shell
$ make
```
Run the server, which will listen on port 50051:
```shell
$ ./route_guide_server
```
Run the client (in a different terminal):
```shell
$ ./route_guide_client
```

@ -1,264 +1,6 @@
# gRPC C++ Hello World Tutorial
# gRPC C++ Hello World Example
### Install gRPC
Make sure you have installed gRPC on your system. Follow the
[BUILDING.md](../../../BUILDING.md) instructions.
You can find a complete set of instructions for building gRPC and running the
Hello World app in the [C++ Quick Start][].
### Get the tutorial source code
The example code for this and our other examples lives in the `examples`
directory. Clone this repository at the [latest stable release tag](https://github.com/grpc/grpc/releases)
to your local machine by running the following command:
```sh
$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc
```
Change your current directory to examples/cpp/helloworld
```sh
$ cd examples/cpp/helloworld/
```
### Defining a service
The first step in creating our example is to define a *service*: an RPC
service specifies the methods that can be called remotely with their parameters
and return types. As you saw in the
[overview](#protocolbuffers) above, gRPC does this using [protocol
buffers](https://developers.google.com/protocol-buffers/docs/overview). We
use the protocol buffers interface definition language (IDL) to define our
service methods, and define the parameters and return
types as protocol buffer message types. Both the client and the
server use interface code generated from the service definition.
Here's our example service definition, defined using protocol buffers IDL in
[helloworld.proto](../../protos/helloworld.proto). The `Greeting`
service has one method, `hello`, that lets the server receive a single
`HelloRequest`
message from the remote client containing the user's name, then send back
a greeting in a single `HelloReply`. This is the simplest type of RPC you
can specify in gRPC - we'll look at some other types later in this document.
```protobuf
syntax = "proto3";
option java_package = "ex.grpc";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
```
<a name="generating"></a>
### Generating gRPC code
Once we've defined our service, we use the protocol buffer compiler
`protoc` to generate the special client and server code we need to create
our application. The generated code contains both stub code for clients to
use and an abstract interface for servers to implement, both with the method
defined in our `Greeting` service.
To generate the client and server side interfaces:
```sh
$ make helloworld.grpc.pb.cc helloworld.pb.cc
```
Which internally invokes the proto-compiler as:
```sh
$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto
$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto
```
### Writing a client
- Create a channel. A channel is a logical connection to an endpoint. A gRPC
channel can be created with the target address, credentials to use and
arguments as follows
```cpp
auto channel = CreateChannel("localhost:50051", InsecureChannelCredentials());
```
- Create a stub. A stub implements the rpc methods of a service and in the
generated code, a method is provided to create a stub with a channel:
```cpp
auto stub = helloworld::Greeter::NewStub(channel);
```
- Make a unary rpc, with `ClientContext` and request/response proto messages.
```cpp
ClientContext context;
HelloRequest request;
request.set_name("hello");
HelloReply reply;
Status status = stub->SayHello(&context, request, &reply);
```
- Check returned status and response.
```cpp
if (status.ok()) {
// check reply.message()
} else {
// rpc failed.
}
```
For a working example, refer to [greeter_client.cc](greeter_client.cc).
### Writing a server
- Implement the service interface
```cpp
class GreeterServiceImpl final : public Greeter::Service {
Status SayHello(ServerContext* context, const HelloRequest* request,
HelloReply* reply) override {
std::string prefix("Hello ");
reply->set_message(prefix + request->name());
return Status::OK;
}
};
```
- Build a server exporting the service
```cpp
GreeterServiceImpl service;
ServerBuilder builder;
builder.AddListeningPort("0.0.0.0:50051", grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
```
For a working example, refer to [greeter_server.cc](greeter_server.cc).
### Writing asynchronous client and server
gRPC uses `CompletionQueue` API for asynchronous operations. The basic work flow
is
- bind a `CompletionQueue` to a rpc call
- do something like a read or write, present with a unique `void*` tag
- call `CompletionQueue::Next` to wait for operations to complete. If a tag
appears, it indicates that the corresponding operation is complete.
#### Async client
The channel and stub creation code is the same as the sync client.
- Initiate the rpc and create a handle for the rpc. Bind the rpc to a
`CompletionQueue`.
```cpp
CompletionQueue cq;
auto rpc = stub->AsyncSayHello(&context, request, &cq);
```
- Ask for reply and final status, with a unique tag
```cpp
Status status;
rpc->Finish(&reply, &status, (void*)1);
```
- Wait for the completion queue to return the next tag. The reply and status are
ready once the tag passed into the corresponding `Finish()` call is returned.
```cpp
void* got_tag;
bool ok = false;
cq.Next(&got_tag, &ok);
if (ok && got_tag == (void*)1) {
// check reply and status
}
```
For a working example, refer to [greeter_async_client.cc](greeter_async_client.cc).
#### Async server
The server implementation requests a rpc call with a tag and then wait for the
completion queue to return the tag. The basic flow is
- Build a server exporting the async service
```cpp
helloworld::Greeter::AsyncService service;
ServerBuilder builder;
builder.AddListeningPort("0.0.0.0:50051", InsecureServerCredentials());
builder.RegisterService(&service);
auto cq = builder.AddCompletionQueue();
auto server = builder.BuildAndStart();
```
- Request one rpc
```cpp
ServerContext context;
HelloRequest request;
ServerAsyncResponseWriter<HelloReply> responder;
service.RequestSayHello(&context, &request, &responder, &cq, &cq, (void*)1);
```
- Wait for the completion queue to return the tag. The context, request and
responder are ready once the tag is retrieved.
```cpp
HelloReply reply;
Status status;
void* got_tag;
bool ok = false;
cq.Next(&got_tag, &ok);
if (ok && got_tag == (void*)1) {
// set reply and status
responder.Finish(reply, status, (void*)2);
}
```
- Wait for the completion queue to return the tag. The rpc is finished when the
tag is back.
```cpp
void* got_tag;
bool ok = false;
cq.Next(&got_tag, &ok);
if (ok && got_tag == (void*)2) {
// clean up
}
```
To handle multiple rpcs, the async server creates an object `CallData` to
maintain the state of each rpc and use the address of it as the unique tag. For
simplicity the server only uses one completion queue for all events, and runs a
main loop in `HandleRpcs` to query the queue.
For a working example, refer to [greeter_async_server.cc](greeter_async_server.cc).
#### Flags for the client
```sh
./greeter_client --target="a target string used to create a GRPC client channel"
```
The Default value for --target is "localhost:50051".
[C++ Quick Start]: https://grpc.io/docs/quickstart/cpp

@ -37,6 +37,7 @@ py_binary(
name = "client",
testonly = 1,
srcs = ["client.py"],
imports = ["."],
python_version = "PY3",
srcs_version = "PY3",
deps = [
@ -50,6 +51,7 @@ py_binary(
name = "server",
testonly = 1,
srcs = ["server.py"],
imports = ["."],
python_version = "PY3",
srcs_version = "PY3",
deps = [

@ -1,28 +1,27 @@
## Multiprocessing with gRPC Python
Multiprocessing allows application developers to sidestep the Python global
interpreter lock and achieve true concurrency on multicore systems.
interpreter lock and achieve true parallelism on multicore systems.
Unfortunately, using multiprocessing and gRPC Python is not yet as simple as
instantiating your server with a `futures.ProcessPoolExecutor`.
The library is implemented as a C extension, maintaining much of the state that
drives the system in native code. As such, upon calling
[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), much of the
state copied into the child process is invalid, leading to hangs and crashes.
However, calling `fork` without `exec` in your python process is supported
*before* any gRPC servers have been instantiated. Application developers can
[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), any threads in a
critical section may leave the state of the gRPC library invalid in the child
process. See this [excellent research
paper](https://www.microsoft.com/en-us/research/uploads/prod/2019/04/fork-hotos19.pdf)
for a thorough discussion of the topic.
Calling `fork` without `exec` in your process *is* supported
before any gRPC servers have been instantiated. Application developers can
take advantage of this to parallelize their CPU-intensive operations.
## Calculating Prime Numbers with Multiple Processes
This example calculates the first 10,000 prime numbers as an RPC. We instantiate
one server per subprocess, balancing requests between the servers using the
[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option. Note that this
option is not available in `manylinux1` distributions, which are, as of the time
of writing, the only gRPC Python wheels available on PyPI. To take advantage of this
feature, you'll need to build from source, either using bazel (as we do for
these examples) or via pip, using `pip install grpcio --no-binary grpcio`.
[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option.
```python
_PROCESS_COUNT = multiprocessing.cpu_count()
@ -65,3 +64,11 @@ For example,
```
bazel run //examples/python/multiprocessing:client -- [::]:33915
```
Alternatively, generate code using the following and then run the client and server
directly:
```python
cd examples/python/helloworld
python -m grpc_tools.protoc -I . prime.proto --python_out=. --grpc_python_out=.
```

@ -26,8 +26,8 @@ import sys
import grpc
from examples.python.multiprocessing import prime_pb2
from examples.python.multiprocessing import prime_pb2_grpc
import prime_pb2
import prime_pb2_grpc
_PROCESS_COUNT = 8
_MAXIMUM_CANDIDATE = 10000

@ -29,8 +29,8 @@ import sys
import grpc
from examples.python.multiprocessing import prime_pb2
from examples.python.multiprocessing import prime_pb2_grpc
import prime_pb2
import prime_pb2_grpc
_LOGGER = logging.getLogger(__name__)
@ -67,12 +67,6 @@ def _run_server(bind_address):
_LOGGER.info('Starting new server.')
options = (('grpc.so_reuseport', 1),)
# WARNING: This example takes advantage of SO_REUSEPORT. Due to the
# limitations of manylinux1, none of our precompiled Linux wheels currently
# support this option. (https://github.com/grpc/grpc/issues/18210). To take
# advantage of this feature, install from source with
# `pip install grpcio --no-binary grpcio`.
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=_THREAD_CONCURRENCY,),
options=options)

@ -233,6 +233,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
@ -683,6 +684,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',

@ -206,6 +206,8 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
@ -221,10 +223,13 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.h',
'src/core/ext/filters/client_channel/lb_policy_factory.h',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
@ -1032,6 +1037,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',

@ -128,6 +128,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/http_proxy.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc )
@ -143,10 +145,13 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/priority/priority.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/cds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/eds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy_factory.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy_registry.cc )

@ -426,6 +426,7 @@
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/container:inlined_vector',
],
'sources': [
@ -442,6 +443,7 @@
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
@ -450,9 +452,12 @@
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
@ -916,6 +921,7 @@
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/container:inlined_vector',
],
'sources': [
@ -932,6 +938,7 @@
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
@ -940,9 +947,12 @@
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',

@ -344,22 +344,11 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the xDS load
balancer before using fallback backend addresses from the resolver.
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/* Time in milliseconds to wait before a locality is deleted after it's removed
from the received EDS update. If 0, delete the locality immediately. Default
value is 15 minutes. */
#define GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS \
"grpc.xds_locality_retention_interval_ms"
/* Timeout in milliseconds to wait for the localities of a specific priority to
complete their initial connection attempt before xDS fails over to the next
priority. Specifically, the connection attempt of a priority is considered
completed when any locality of that priority is ready or all the localities
of that priority fail to connect. If 0, failover happens immediately. Default
value is 10 seconds. */
#define GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS "grpc.xds_failover_timeout_ms"
/* Timeout in milliseconds to wait for the child of a specific priority to
complete its initial connection attempt before the priority LB policy fails
over to the next priority. Default value is 10 seconds. */
#define GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS \
"grpc.priority_failover_timeout_ms"
/* Timeout in milliseconds to wait for a resource to be returned from
* the xds server before assuming that it does not exist.
* The default is 15 seconds. */

@ -108,6 +108,8 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/http_proxy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/address_filtering.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/address_filtering.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc" role="src" />
@ -123,10 +125,13 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/priority/priority.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/cds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/xds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/eds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/xds.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy_registry.cc" role="src" />

@ -0,0 +1,83 @@
//
// Copyright 2020 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/lib/channel/channel_args.h"
#define GRPC_ARG_HIERARCHICAL_PATH "grpc.internal.address.hierarchical_path"
namespace grpc_core {
namespace {
void* HierarchicalPathCopy(void* p) {
std::vector<std::string>* path = static_cast<std::vector<std::string>*>(p);
return static_cast<void*>(new std::vector<std::string>(*path));
}
void HierarchicalPathDestroy(void* p) {
std::vector<std::string>* path = static_cast<std::vector<std::string>*>(p);
delete path;
}
int HierarchicalPathCompare(void* p1, void* p2) {
std::vector<std::string>* path1 = static_cast<std::vector<std::string>*>(p1);
std::vector<std::string>* path2 = static_cast<std::vector<std::string>*>(p2);
for (size_t i = 0; i < path1->size(); ++i) {
if (path2->size() == i) return 1;
int r = (*path1)[i].compare((*path2)[i]);
if (r != 0) return r;
}
if (path2->size() > path1->size()) return -1;
return 0;
}
const grpc_arg_pointer_vtable hierarchical_path_arg_vtable = {
HierarchicalPathCopy, HierarchicalPathDestroy, HierarchicalPathCompare};
} // namespace
grpc_arg MakeHierarchicalPathArg(const std::vector<std::string>& path) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_HIERARCHICAL_PATH),
const_cast<std::vector<std::string>*>(&path),
&hierarchical_path_arg_vtable);
}
HierarchicalAddressMap MakeHierarchicalAddressMap(
const ServerAddressList& addresses) {
HierarchicalAddressMap result;
for (const ServerAddress& address : addresses) {
auto* path = grpc_channel_args_find_pointer<std::vector<std::string>>(
address.args(), GRPC_ARG_HIERARCHICAL_PATH);
if (path == nullptr || path->empty()) continue;
auto it = path->begin();
ServerAddressList& target_list = result[*it];
++it;
std::vector<std::string> remaining_path(it, path->end());
const char* name_to_remove = GRPC_ARG_HIERARCHICAL_PATH;
grpc_arg new_arg = MakeHierarchicalPathArg(remaining_path);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
address.args(), &name_to_remove, 1, &new_arg, 1);
target_list.emplace_back(address.address(), new_args);
}
return result;
}
} // namespace grpc_core

@ -0,0 +1,99 @@
//
// Copyright 2020 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H
#include <grpc/support/port_platform.h>
#include <map>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "src/core/ext/filters/client_channel/server_address.h"
// The resolver returns a flat list of addresses. When a hierarchy of
// LB policies is in use, each leaf of the hierarchy will need a
// different subset of those addresses. This library provides a
// mechanism for determining which address is passed to which leaf
// policy.
//
// Each address will have an associated path that indicates which child
// it should be sent to at each level of the hierarchy to wind up at the
// right leaf policy. Each LB policy will look at the first element of
// the path of each address to determine which child to send the address
// to. It will then remove that first element when passing the address
// down to its child.
//
// For example, consider the following LB policy hierarchy:
//
// - priority
// - child0 (weighted_target)
// - localityA (round_robin)
// - localityB (round_robin)
// - child1 (weighted_target)
// - localityC (round_robin)
// - localityD (round_robin)
//
// Now consider the following addresses:
// - 10.0.0.1:80 path=["child0", "localityA"]
// - 10.0.0.2:80 path=["child0", "localityB"]
// - 10.0.0.3:80 path=["child1", "localityC"]
// - 10.0.0.4:80 path=["child1", "localityD"]
//
// The priority policy will split this up into two lists, one for each
// of its children:
// - child0:
// - 10.0.0.1:80 path=["localityA"]
// - 10.0.0.2:80 path=["localityB"]
// - child1:
// - 10.0.0.3:80 path=["localityC"]
// - 10.0.0.4:80 path=["localityD"]
//
// The weighted_target policy for child0 will split its list up into two
// lists, one for each of its children:
// - localityA:
// - 10.0.0.1:80 path=[]
// - localityB:
// - 10.0.0.2:80 path=[]
//
// Similarly, the weighted_target policy for child1 will split its list
// up into two lists, one for each of its children:
// - localityC:
// - 10.0.0.3:80 path=[]
// - localityD:
// - 10.0.0.4:80 path=[]
namespace grpc_core {
// Constructs a channel arg containing the hierarchical path
// to be associated with an address.
grpc_arg MakeHierarchicalPathArg(const std::vector<std::string>& path);
// A map from the next path element to the addresses that fall under
// that path element.
using HierarchicalAddressMap = std::map<std::string, ServerAddressList>;
// Splits up the addresses into a separate list for each child.
HierarchicalAddressMap MakeHierarchicalAddressMap(
const ServerAddressList& addresses);
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H \
*/

@ -0,0 +1,875 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <limits.h>
#include "absl/strings/str_cat.h"
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
namespace grpc_core {
TraceFlag grpc_lb_priority_trace(false, "priority_lb");
namespace {
constexpr char kPriority[] = "priority_experimental";
// How long we keep a child around for after it is no longer being used
// (either because it has been removed from the config or because we
// have switched to a higher-priority child).
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000;
// Default for how long we wait for a newly created child to get connected
// before starting to attempt the next priority. Overridable via channel arg.
constexpr int kDefaultChildFailoverTimeoutMs = 10000;
// Config for priority LB policy.
class PriorityLbConfig : public LoadBalancingPolicy::Config {
public:
PriorityLbConfig(
std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>
children,
std::vector<std::string> priorities)
: children_(std::move(children)), priorities_(std::move(priorities)) {}
const char* name() const override { return kPriority; }
const std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>&
children() const {
return children_;
}
const std::vector<std::string>& priorities() const { return priorities_; }
private:
const std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>
children_;
const std::vector<std::string> priorities_;
};
// priority LB policy.
class PriorityLb : public LoadBalancingPolicy {
public:
explicit PriorityLb(Args args);
const char* name() const override { return kPriority; }
void UpdateLocked(UpdateArgs args) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
private:
// Each ChildPriority holds a ref to the PriorityLb.
class ChildPriority : public InternallyRefCounted<ChildPriority> {
public:
ChildPriority(RefCountedPtr<PriorityLb> priority_policy, std::string name);
~ChildPriority() {
priority_policy_.reset(DEBUG_LOCATION, "ChildPriority");
}
const std::string& name() const { return name_; }
void UpdateLocked(RefCountedPtr<LoadBalancingPolicy::Config> config);
void ExitIdleLocked();
void ResetBackoffLocked();
void DeactivateLocked();
void MaybeReactivateLocked();
void MaybeCancelFailoverTimerLocked();
void Orphan() override;
std::unique_ptr<SubchannelPicker> GetPicker() {
return absl::make_unique<RefCountedPickerWrapper>(picker_wrapper_);
}
grpc_connectivity_state connectivity_state() const {
return connectivity_state_;
}
bool failover_timer_callback_pending() const {
return failover_timer_callback_pending_;
}
private:
// A simple wrapper for ref-counting a picker from the child policy.
class RefCountedPicker : public RefCounted<RefCountedPicker> {
public:
explicit RefCountedPicker(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// A non-ref-counted wrapper for RefCountedPicker.
class RefCountedPickerWrapper : public SubchannelPicker {
public:
explicit RefCountedPickerWrapper(RefCountedPtr<RefCountedPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) override { return picker_->Pick(args); }
private:
RefCountedPtr<RefCountedPicker> picker_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<ChildPriority> priority)
: priority_(std::move(priority)) {}
~Helper() { priority_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<ChildPriority> priority_;
};
// Methods for dealing with the child policy.
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void OnConnectivityStateUpdateLocked(
grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker);
void StartFailoverTimerLocked();
static void OnFailoverTimer(void* arg, grpc_error* error);
static void OnFailoverTimerLocked(void* arg, grpc_error* error);
static void OnDeactivationTimer(void* arg, grpc_error* error);
static void OnDeactivationTimerLocked(void* arg, grpc_error* error);
RefCountedPtr<PriorityLb> priority_policy_;
const std::string name_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING;
RefCountedPtr<RefCountedPicker> picker_wrapper_;
// States for delayed removal.
grpc_timer deactivation_timer_;
grpc_closure on_deactivation_timer_;
bool deactivation_timer_callback_pending_ = false;
// States of failover.
grpc_timer failover_timer_;
grpc_closure on_failover_timer_;
grpc_closure on_failover_timer_locked_;
bool failover_timer_callback_pending_ = false;
};
~PriorityLb();
void ShutdownLocked() override;
// Returns UINT32_MAX if child is not in current priority list.
uint32_t GetChildPriorityLocked(const std::string& child_name) const;
void HandleChildConnectivityStateChangeLocked(ChildPriority* child);
void DeleteChild(ChildPriority* child);
void TryNextPriorityLocked(bool report_connecting);
void SelectPriorityLocked(uint32_t priority);
const int child_failover_timeout_ms_;
// Current channel args and config from the resolver.
const grpc_channel_args* args_ = nullptr;
RefCountedPtr<PriorityLbConfig> config_;
HierarchicalAddressMap addresses_;
// Internal state.
bool shutting_down_ = false;
std::map<std::string, OrphanablePtr<ChildPriority>> children_;
// The priority that is being used.
uint32_t current_priority_ = UINT32_MAX;
// Points to the current child from before the most recent update.
// We will continue to use this child until we decide which of the new
// children to use.
ChildPriority* current_child_from_before_update_ = nullptr;
};
//
// PriorityLb
//
PriorityLb::PriorityLb(Args args)
: LoadBalancingPolicy(std::move(args)),
child_failover_timeout_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS,
{kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] created", this);
}
}
PriorityLb::~PriorityLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] destroying priority LB policy", this);
}
grpc_channel_args_destroy(args_);
}
void PriorityLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] shutting down", this);
}
shutting_down_ = true;
children_.clear();
}
void PriorityLb::ExitIdleLocked() {
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] exiting IDLE for current priority %d child %s",
this, current_priority_, child_name.c_str());
}
children_[child_name]->ExitIdleLocked();
}
}
void PriorityLb::ResetBackoffLocked() {
for (const auto& p : children_) p.second->ResetBackoffLocked();
}
void PriorityLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] received update", this);
}
// Save current child.
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
current_child_from_before_update_ = children_[child_name].get();
// Unset current_priority_, since it was an index into the old
// config's priority list and may no longer be valid. It will be
// reset later by TryNextPriorityLocked(), but we unset it here in
// case updating any of our children triggers a state update.
current_priority_ = UINT32_MAX;
}
// Update config.
config_ = std::move(args.config);
// Update args.
grpc_channel_args_destroy(args_);
args_ = args.args;
args.args = nullptr;
// Update addresses.
addresses_ = MakeHierarchicalAddressMap(args.addresses);
// Check all existing children against the new config.
for (const auto& p : children_) {
const std::string& child_name = p.first;
auto& child = p.second;
auto config_it = config_->children().find(child_name);
if (config_it == config_->children().end()) {
// Existing child not found in new config. Deactivate it.
child->DeactivateLocked();
} else {
// Existing child found in new config. Update it.
child->UpdateLocked(config_it->second);
}
}
// Try to get connected.
TryNextPriorityLocked(/*report_connecting=*/children_.empty());
}
uint32_t PriorityLb::GetChildPriorityLocked(
const std::string& child_name) const {
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
if (config_->priorities()[priority] == child_name) return priority;
}
return UINT32_MAX;
}
void PriorityLb::HandleChildConnectivityStateChangeLocked(
ChildPriority* child) {
// Special case for the child that was the current child before the
// most recent update.
if (child == current_child_from_before_update_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] state update for current child from before "
"config update",
this);
}
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
// If it's still READY or IDLE, we stick with this child, so pass
// the new picker up to our parent.
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
} else {
// If it's no longer READY or IDLE, we should stop using it.
// We already started trying other priorities as a result of the
// update, but calling TryNextPriorityLocked() ensures that we will
// properly select between CONNECTING and TRANSIENT_FAILURE as the
// new state to report to our parent.
current_child_from_before_update_ = nullptr;
TryNextPriorityLocked(/*report_connecting=*/true);
}
return;
}
// Otherwise, find the child's priority.
uint32_t child_priority = GetChildPriorityLocked(child->name());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] state update for priority %d, child %s",
this, child_priority, child->name().c_str());
}
// Ignore priorities not in the current config.
if (child_priority == UINT32_MAX) return;
// Ignore lower-than-current priorities.
if (child_priority > current_priority_) return;
// If a child reports TRANSIENT_FAILURE, start trying the next priority.
// Note that even if this is for a higher-than-current priority, we
// may still need to create some children between this priority and
// the current one (e.g., if we got an update that inserted new
// priorities ahead of the current one).
if (child->connectivity_state() == GRPC_CHANNEL_TRANSIENT_FAILURE) {
TryNextPriorityLocked(
/*report_connecting=*/child_priority == current_priority_);
return;
}
// The update is for a higher-than-current priority (or for any
// priority if we don't have any current priority).
if (child_priority < current_priority_) {
// If the child reports READY or IDLE, switch to that priority.
// Otherwise, ignore the update.
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
SelectPriorityLocked(child_priority);
}
return;
}
// The current priority has returned a new picker, so pass it up to
// our parent.
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
}
void PriorityLb::DeleteChild(ChildPriority* child) {
// If this was the current child from before the most recent update,
// stop using it. We already started trying other priorities as a
// result of the update, but calling TryNextPriorityLocked() ensures that
// we will properly select between CONNECTING and TRANSIENT_FAILURE as the
// new state to report to our parent.
if (current_child_from_before_update_ == child) {
current_child_from_before_update_ = nullptr;
TryNextPriorityLocked(/*report_connecting=*/true);
}
children_.erase(child->name());
}
void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] trying priority %d, child %s", this,
priority, child_name.c_str());
}
auto& child = children_[child_name];
if (child == nullptr) {
if (report_connecting) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker")));
}
child = MakeOrphanable<ChildPriority>(
Ref(DEBUG_LOCATION, "ChildPriority"), child_name);
child->UpdateLocked(config_->children().find(child_name)->second);
return;
}
// The child already exists.
child->MaybeReactivateLocked();
// If the child is in state READY or IDLE, switch to it.
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
SelectPriorityLocked(priority);
return;
}
// Child is not READY or IDLE.
// If its failover timer is still pending, give it time to fire.
if (child->failover_timer_callback_pending()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] priority %d, child %s: child still "
"attempting to connect, will wait",
this, priority, child_name.c_str());
}
if (report_connecting) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker")));
}
return;
}
// Child has been failing for a while. Move on to the next priority.
}
// If there are no more priorities to try, report TRANSIENT_FAILURE.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] no priority reachable, putting channel in "
"TRANSIENT_FAILURE",
this);
}
current_priority_ = UINT32_MAX;
current_child_from_before_update_ = nullptr;
grpc_error* error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready priority"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
}
void PriorityLb::SelectPriorityLocked(uint32_t priority) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] selected priority %d, child %s", this,
priority, config_->priorities()[priority].c_str());
}
current_priority_ = priority;
current_child_from_before_update_ = nullptr;
// Deactivate lower priorities.
for (uint32_t p = priority + 1; p < config_->priorities().size(); ++p) {
const std::string& child_name = config_->priorities()[p];
auto it = children_.find(child_name);
if (it != children_.end()) it->second->DeactivateLocked();
}
// Update picker.
auto& child = children_[config_->priorities()[priority]];
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
}
//
// PriorityLb::ChildPriority
//
PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] creating child %s (%p)",
priority_policy_.get(), name_.c_str(), this);
}
GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_failover_timer_locked_, OnFailoverTimerLocked, this,
nullptr);
// Start the failover timer.
StartFailoverTimerLocked();
}
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): orphaned",
priority_policy_.get(), name_.c_str(), this);
}
MaybeCancelFailoverTimerLocked();
if (deactivation_timer_callback_pending_) {
grpc_timer_cancel(&deactivation_timer_);
}
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
priority_policy_->interested_parties());
child_policy_.reset();
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_wrapper_.reset();
if (deactivation_timer_callback_pending_) {
grpc_timer_cancel(&deactivation_timer_);
}
Unref(DEBUG_LOCATION, "ChildPriority+Orphan");
}
void PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config) {
if (priority_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): start update",
priority_policy_.get(), name_.c_str(), this);
}
// Create policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(priority_policy_->args_);
}
// Construct update args.
UpdateArgs update_args;
update_args.config = std::move(config);
update_args.addresses = priority_policy_->addresses_[name_];
update_args.args = grpc_channel_args_copy(priority_policy_->args_);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): updating child policy handler %p",
priority_policy_.get(), name_.c_str(), this, child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
OrphanablePtr<LoadBalancingPolicy>
PriorityLb::ChildPriority::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = priority_policy_->combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_priority_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): created new child policy "
"handler %p",
priority_policy_.get(), name_.c_str(), this, lb_policy.get());
}
// Add the parent's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on the parent LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
priority_policy_->interested_parties());
return lb_policy;
}
void PriorityLb::ChildPriority::ExitIdleLocked() {
if (connectivity_state_ == GRPC_CHANNEL_IDLE &&
!failover_timer_callback_pending_) {
StartFailoverTimerLocked();
}
child_policy_->ExitIdleLocked();
}
void PriorityLb::ChildPriority::ResetBackoffLocked() {
child_policy_->ResetBackoffLocked();
}
void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): state update: %s, picker %p",
priority_policy_.get(), name_.c_str(), this,
ConnectivityStateName(state), picker.get());
}
// Store the state and picker.
connectivity_state_ = state;
picker_wrapper_ = MakeRefCounted<RefCountedPicker>(std::move(picker));
// If READY or TRANSIENT_FAILURE, cancel failover timer.
if (state == GRPC_CHANNEL_READY || state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
MaybeCancelFailoverTimerLocked();
}
// Notify the parent policy.
priority_policy_->HandleChildConnectivityStateChangeLocked(this);
}
void PriorityLb::ChildPriority::StartFailoverTimerLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %d ms",
priority_policy_.get(), name_.c_str(), this,
priority_policy_->child_failover_timeout_ms_);
}
Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release();
grpc_timer_init(
&failover_timer_,
ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_ms_,
&on_failover_timer_);
failover_timer_callback_pending_ = true;
}
void PriorityLb::ChildPriority::MaybeCancelFailoverTimerLocked() {
if (failover_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): cancelling failover timer",
priority_policy_.get(), name_.c_str(), this);
}
grpc_timer_cancel(&failover_timer_);
failover_timer_callback_pending_ = false;
}
}
void PriorityLb::ChildPriority::OnFailoverTimer(void* arg, grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(&self->on_failover_timer_locked_,
GRPC_ERROR_REF(error));
}
void PriorityLb::ChildPriority::OnFailoverTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->failover_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
self->priority_policy_.get(), self->name_.c_str(), self);
}
self->failover_timer_callback_pending_ = false;
self->OnConnectivityStateUpdateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
nullptr);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked");
}
void PriorityLb::ChildPriority::DeactivateLocked() {
// If already deactivated, don't do it again.
if (deactivation_timer_callback_pending_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in %d "
"ms.",
priority_policy_.get(), name_.c_str(), this,
kChildRetentionIntervalMs);
}
MaybeCancelFailoverTimerLocked();
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "ChildPriority+timer").release();
GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&deactivation_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
&on_deactivation_timer_);
deactivation_timer_callback_pending_ = true;
}
void PriorityLb::ChildPriority::MaybeReactivateLocked() {
if (deactivation_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): reactivating",
priority_policy_.get(), name_.c_str(), this);
}
deactivation_timer_callback_pending_ = false;
grpc_timer_cancel(&deactivation_timer_);
}
}
void PriorityLb::ChildPriority::OnDeactivationTimer(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_deactivation_timer_,
OnDeactivationTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
}
void PriorityLb::ChildPriority::OnDeactivationTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->deactivation_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
self->priority_policy_.get(), self->name_.c_str(), self);
}
self->deactivation_timer_callback_pending_ = false;
self->priority_policy_->DeleteChild(self);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+timer");
}
//
// PriorityLb::ChildPriority::Helper
//
void PriorityLb::ChildPriority::Helper::RequestReresolution() {
if (priority_->priority_policy_->shutting_down_) return;
priority_->priority_policy_->channel_control_helper()->RequestReresolution();
}
RefCountedPtr<SubchannelInterface>
PriorityLb::ChildPriority::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (priority_->priority_policy_->shutting_down_) return nullptr;
return priority_->priority_policy_->channel_control_helper()
->CreateSubchannel(args);
}
void PriorityLb::ChildPriority::Helper::UpdateState(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (priority_->priority_policy_->shutting_down_) return;
// Notify the priority.
priority_->OnConnectivityStateUpdateLocked(state, std::move(picker));
}
void PriorityLb::ChildPriority::Helper::AddTraceEvent(TraceSeverity severity,
StringView message) {
if (priority_->priority_policy_->shutting_down_) return;
priority_->priority_policy_->channel_control_helper()->AddTraceEvent(severity,
message);
}
//
// factory
//
class PriorityLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<PriorityLb>(std::move(args));
}
const char* name() const override { return kPriority; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// priority was mentioned as a policy in the deprecated
// loadBalancingPolicy field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:priority policy requires "
"configuration. Please use loadBalancingConfig field of service "
"config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Children.
std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>> children;
auto it = json.object_value().find("children");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:children error:required field missing"));
} else if (it->second.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:children error:type should be object"));
} else {
const Json::Object& object = it->second.object_value();
for (const auto& p : object) {
const std::string& child_name = p.first;
const Json& element = p.second;
if (element.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name,
" error:should be type object")
.c_str()));
} else {
auto it2 = element.object_value().find("config");
if (it2 == element.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name,
" error:missing 'config' field")
.c_str()));
} else {
grpc_error* parse_error = GRPC_ERROR_NONE;
auto config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
it2->second, &parse_error);
if (config == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
error_list.push_back(
GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name).c_str(),
&parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
}
children[child_name] = std::move(config);
}
}
}
}
// Priorities.
std::vector<std::string> priorities;
it = json.object_value().find("priorities");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:priorities error:required field missing"));
} else if (it->second.type() != Json::Type::ARRAY) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:priorities error:type should be array"));
} else {
const Json::Array& array = it->second.array_value();
for (size_t i = 0; i < array.size(); ++i) {
const Json& element = array[i];
if (element.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities element:", i,
" error:should be type string")
.c_str()));
} else if (children.find(element.string_value()) == children.end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities element:", i,
" error:unknown child '", element.string_value(),
"'")
.c_str()));
} else {
priorities.emplace_back(element.string_value());
}
}
if (priorities.size() != children.size()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities error:priorities size (",
priorities.size(), ") != children size (",
children.size(), ")")
.c_str()));
}
}
if (error_list.empty()) {
return MakeRefCounted<PriorityLbConfig>(std::move(children),
std::move(priorities));
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"priority_experimental LB policy config", &error_list);
return nullptr;
}
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_priority_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::PriorityLbFactory>());
}
void grpc_lb_policy_priority_shutdown() {}

@ -0,0 +1,722 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <limits.h>
#include <string.h>
#include "absl/strings/str_cat.h"
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
namespace grpc_core {
TraceFlag grpc_lb_weighted_target_trace(false, "weighted_target_lb");
namespace {
constexpr char kWeightedTarget[] = "weighted_target_experimental";
// How long we keep a child around for after it has been removed from
// the config.
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000;
// Config for weighted_target LB policy.
class WeightedTargetLbConfig : public LoadBalancingPolicy::Config {
public:
struct ChildConfig {
uint32_t weight;
RefCountedPtr<LoadBalancingPolicy::Config> config;
};
using TargetMap = std::map<std::string, ChildConfig>;
explicit WeightedTargetLbConfig(TargetMap target_map)
: target_map_(std::move(target_map)) {}
const char* name() const override { return kWeightedTarget; }
const TargetMap& target_map() const { return target_map_; }
private:
TargetMap target_map_;
};
// weighted_target LB policy.
class WeightedTargetLb : public LoadBalancingPolicy {
public:
explicit WeightedTargetLb(Args args);
const char* name() const override { return kWeightedTarget; }
void UpdateLocked(UpdateArgs args) override;
void ResetBackoffLocked() override;
private:
// A simple wrapper for ref-counting a picker from the child policy.
class ChildPickerWrapper : public RefCounted<ChildPickerWrapper> {
public:
explicit ChildPickerWrapper(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// Picks a child using stateless WRR and then delegates to that
// child's picker.
class WeightedPicker : public SubchannelPicker {
public:
// Maintains a weighted list of pickers from each child that is in
// ready state. The first element in the pair represents the end of a
// range proportional to the child's weight. The start of the range
// is the previous value in the vector and is 0 for the first element.
using PickerList =
InlinedVector<std::pair<uint32_t, RefCountedPtr<ChildPickerWrapper>>,
1>;
explicit WeightedPicker(PickerList pickers)
: pickers_(std::move(pickers)) {}
PickResult Pick(PickArgs args) override;
private:
PickerList pickers_;
};
// Each WeightedChild holds a ref to its parent WeightedTargetLb.
class WeightedChild : public InternallyRefCounted<WeightedChild> {
public:
WeightedChild(RefCountedPtr<WeightedTargetLb> weighted_target_policy,
const std::string& name);
~WeightedChild();
void Orphan() override;
void UpdateLocked(const WeightedTargetLbConfig::ChildConfig& config,
ServerAddressList addresses,
const grpc_channel_args* args);
void ResetBackoffLocked();
void DeactivateLocked();
uint32_t weight() const { return weight_; }
grpc_connectivity_state connectivity_state() const {
return connectivity_state_;
}
RefCountedPtr<ChildPickerWrapper> picker_wrapper() const {
return picker_wrapper_;
}
private:
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<WeightedChild> weighted_child)
: weighted_child_(std::move(weighted_child)) {}
~Helper() { weighted_child_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<WeightedChild> weighted_child_;
};
// Methods for dealing with the child policy.
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void OnConnectivityStateUpdateLocked(
grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker);
static void OnDelayedRemovalTimer(void* arg, grpc_error* error);
static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error);
// The owning LB policy.
RefCountedPtr<WeightedTargetLb> weighted_target_policy_;
const std::string& name_;
uint32_t weight_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
RefCountedPtr<ChildPickerWrapper> picker_wrapper_;
grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING;
bool seen_failure_since_ready_ = false;
// States for delayed removal.
grpc_timer delayed_removal_timer_;
grpc_closure on_delayed_removal_timer_;
bool delayed_removal_timer_callback_pending_ = false;
bool shutdown_ = false;
};
~WeightedTargetLb();
void ShutdownLocked() override;
void UpdateStateLocked();
// Current config from the resolver.
RefCountedPtr<WeightedTargetLbConfig> config_;
// Internal state.
bool shutting_down_ = false;
// Children.
std::map<std::string, OrphanablePtr<WeightedChild>> targets_;
};
//
// WeightedTargetLb::WeightedPicker
//
WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick(
PickArgs args) {
// Generate a random number in [0, total weight).
const uint32_t key = rand() % pickers_[pickers_.size() - 1].first;
// Find the index in pickers_ corresponding to key.
size_t mid = 0;
size_t start_index = 0;
size_t end_index = pickers_.size() - 1;
size_t index = 0;
while (end_index > start_index) {
mid = (start_index + end_index) / 2;
if (pickers_[mid].first > key) {
end_index = mid;
} else if (pickers_[mid].first < key) {
start_index = mid + 1;
} else {
index = mid + 1;
break;
}
}
if (index == 0) index = start_index;
GPR_ASSERT(pickers_[index].first > key);
// Delegate to the child picker.
return pickers_[index].second->Pick(args);
}
//
// WeightedTargetLb
//
WeightedTargetLb::WeightedTargetLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created", this);
}
}
WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] destroying weighted_target LB policy",
this);
}
}
void WeightedTargetLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this);
}
shutting_down_ = true;
targets_.clear();
}
void WeightedTargetLb::ResetBackoffLocked() {
for (auto& p : targets_) p.second->ResetBackoffLocked();
}
void WeightedTargetLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this);
}
// Update config.
config_ = std::move(args.config);
// Deactivate the targets not in the new config.
for (const auto& p : targets_) {
const std::string& name = p.first;
WeightedChild* child = p.second.get();
if (config_->target_map().find(name) == config_->target_map().end()) {
child->DeactivateLocked();
}
}
// Add or update the targets in the new config.
HierarchicalAddressMap address_map =
MakeHierarchicalAddressMap(args.addresses);
for (const auto& p : config_->target_map()) {
const std::string& name = p.first;
const WeightedTargetLbConfig::ChildConfig& config = p.second;
auto it = targets_.find(name);
if (it == targets_.end()) {
it = targets_.emplace(std::make_pair(name, nullptr)).first;
it->second = MakeOrphanable<WeightedChild>(
Ref(DEBUG_LOCATION, "WeightedChild"), it->first);
}
it->second->UpdateLocked(config, std::move(address_map[name]), args.args);
}
}
void WeightedTargetLb::UpdateStateLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] scanning children to determine "
"connectivity state",
this);
}
// Construct a new picker which maintains a map of all child pickers
// that are ready. Each child is represented by a portion of the range
// proportional to its weight, such that the total range is the sum of the
// weights of all children.
WeightedPicker::PickerList picker_list;
uint32_t end = 0;
// Also count the number of children in each state, to determine the
// overall state.
size_t num_connecting = 0;
size_t num_idle = 0;
size_t num_transient_failures = 0;
for (const auto& p : targets_) {
const std::string& child_name = p.first;
const WeightedChild* child = p.second.get();
// Skip the targets that are not in the latest update.
if (config_->target_map().find(child_name) == config_->target_map().end()) {
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] child=%s state=%s weight=%d picker=%p",
this, child_name.c_str(),
ConnectivityStateName(child->connectivity_state()),
child->weight(), child->picker_wrapper().get());
}
switch (child->connectivity_state()) {
case GRPC_CHANNEL_READY: {
end += child->weight();
picker_list.push_back(std::make_pair(end, child->picker_wrapper()));
break;
}
case GRPC_CHANNEL_CONNECTING: {
++num_connecting;
break;
}
case GRPC_CHANNEL_IDLE: {
++num_idle;
break;
}
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
++num_transient_failures;
break;
}
default:
GPR_UNREACHABLE_CODE(return );
}
}
// Determine aggregated connectivity state.
grpc_connectivity_state connectivity_state;
if (!picker_list.empty()) {
connectivity_state = GRPC_CHANNEL_READY;
} else if (num_connecting > 0) {
connectivity_state = GRPC_CHANNEL_CONNECTING;
} else if (num_idle > 0) {
connectivity_state = GRPC_CHANNEL_IDLE;
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s",
this, ConnectivityStateName(connectivity_state));
}
std::unique_ptr<SubchannelPicker> picker;
switch (connectivity_state) {
case GRPC_CHANNEL_READY:
picker = absl::make_unique<WeightedPicker>(std::move(picker_list));
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
picker =
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker"));
break;
default:
picker = absl::make_unique<TransientFailurePicker>(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"weighted_target: all children report state TRANSIENT_FAILURE"));
}
channel_control_helper()->UpdateState(connectivity_state, std::move(picker));
}
//
// WeightedTargetLb::WeightedChild
//
WeightedTargetLb::WeightedChild::WeightedChild(
RefCountedPtr<WeightedTargetLb> weighted_target_policy,
const std::string& name)
: weighted_target_policy_(std::move(weighted_target_policy)), name_(name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s",
weighted_target_policy_.get(), this, name_.c_str());
}
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: destroying child",
weighted_target_policy_.get(), this, name_.c_str());
}
weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild");
}
void WeightedTargetLb::WeightedChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: shutting down child",
weighted_target_policy_.get(), this, name_.c_str());
}
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
child_policy_->interested_parties(),
weighted_target_policy_->interested_parties());
child_policy_.reset();
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_wrapper_.reset();
if (delayed_removal_timer_callback_pending_) {
delayed_removal_timer_callback_pending_ = false;
grpc_timer_cancel(&delayed_removal_timer_);
}
shutdown_ = true;
Unref();
}
OrphanablePtr<LoadBalancingPolicy>
WeightedTargetLb::WeightedChild::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = weighted_target_policy_->combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_weighted_target_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Created new child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
lb_policy.get());
}
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(
lb_policy->interested_parties(),
weighted_target_policy_->interested_parties());
return lb_policy;
}
void WeightedTargetLb::WeightedChild::UpdateLocked(
const WeightedTargetLbConfig::ChildConfig& config,
ServerAddressList addresses, const grpc_channel_args* args) {
if (weighted_target_policy_->shutting_down_) return;
// Update child weight.
weight_ = config.weight;
// Reactivate if needed.
if (delayed_removal_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: reactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
delayed_removal_timer_callback_pending_ = false;
grpc_timer_cancel(&delayed_removal_timer_);
}
// Create child policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args);
}
// Construct update args.
UpdateArgs update_args;
update_args.config = config.config;
update_args.addresses = std::move(addresses);
update_args.args = grpc_channel_args_copy(args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Updating child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
void WeightedTargetLb::WeightedChild::ResetBackoffLocked() {
child_policy_->ResetBackoffLocked();
}
void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
// Cache the picker in the WeightedChild.
picker_wrapper_ = MakeRefCounted<ChildPickerWrapper>(std::move(picker));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: connectivity "
"state update: state=%s picker_wrapper=%p",
weighted_target_policy_.get(), this, name_.c_str(),
ConnectivityStateName(state), picker_wrapper_.get());
}
// If the child reports IDLE, immediately tell it to exit idle.
if (state == GRPC_CHANNEL_IDLE) child_policy_->ExitIdleLocked();
// Decide what state to report for aggregation purposes.
// If we haven't seen a failure since the last time we were in state
// READY, then we report the state change as-is. However, once we do see
// a failure, we report TRANSIENT_FAILURE and ignore any subsequent state
// changes until we go back into state READY.
if (!seen_failure_since_ready_) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
seen_failure_since_ready_ = true;
}
} else {
if (state != GRPC_CHANNEL_READY) return;
seen_failure_since_ready_ = false;
}
connectivity_state_ = state;
// Notify the LB policy.
weighted_target_policy_->UpdateStateLocked();
}
void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: deactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "WeightedChild+timer").release();
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
delayed_removal_timer_callback_pending_ = true;
grpc_timer_init(&delayed_removal_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
&on_delayed_removal_timer_);
}
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg,
grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
self->weighted_target_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_,
OnDelayedRemovalTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
}
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked(
void* arg, grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
if (error == GRPC_ERROR_NONE &&
self->delayed_removal_timer_callback_pending_ && !self->shutdown_ &&
self->weight_ == 0) {
self->delayed_removal_timer_callback_pending_ = false;
self->weighted_target_policy_->targets_.erase(self->name_);
}
self->Unref(DEBUG_LOCATION, "WeightedChild+timer");
}
//
// WeightedTargetLb::WeightedChild::Helper
//
RefCountedPtr<SubchannelInterface>
WeightedTargetLb::WeightedChild::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return nullptr;
return weighted_child_->weighted_target_policy_->channel_control_helper()
->CreateSubchannel(args);
}
void WeightedTargetLb::WeightedChild::Helper::UpdateState(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->OnConnectivityStateUpdateLocked(state, std::move(picker));
}
void WeightedTargetLb::WeightedChild::Helper::RequestReresolution() {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->weighted_target_policy_->channel_control_helper()
->RequestReresolution();
}
void WeightedTargetLb::WeightedChild::Helper::AddTraceEvent(
TraceSeverity severity, StringView message) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->weighted_target_policy_->channel_control_helper()
->AddTraceEvent(severity, message);
}
//
// factory
//
class WeightedTargetLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<WeightedTargetLb>(std::move(args));
}
const char* name() const override { return kWeightedTarget; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// weighted_target was mentioned as a policy in the deprecated
// loadBalancingPolicy field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:weighted_target policy requires "
"configuration. Please use loadBalancingConfig field of service "
"config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Weight map.
WeightedTargetLbConfig::TargetMap target_map;
auto it = json.object_value().find("targets");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:targets error:required field not present"));
} else if (it->second.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:targets error:type should be object"));
} else {
for (const auto& p : it->second.object_value()) {
WeightedTargetLbConfig::ChildConfig child_config;
std::vector<grpc_error*> child_errors =
ParseChildConfig(p.second, &child_config);
if (!child_errors.empty()) {
// Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error
// string is not static in this case.
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:targets key:", p.first).c_str());
for (grpc_error* child_error : child_errors) {
error = grpc_error_add_child(error, child_error);
}
error_list.push_back(error);
} else {
target_map[p.first] = std::move(child_config);
}
}
}
if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"weighted_target_experimental LB policy config", &error_list);
return nullptr;
}
return MakeRefCounted<WeightedTargetLbConfig>(std::move(target_map));
}
private:
static std::vector<grpc_error*> ParseChildConfig(
const Json& json, WeightedTargetLbConfig::ChildConfig* child_config) {
std::vector<grpc_error*> error_list;
if (json.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"value should be of type object"));
return error_list;
}
// Weight.
auto it = json.object_value().find("weight");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"required field \"weight\" not specified"));
} else if (it->second.type() != Json::Type::NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:must be of type number"));
} else {
child_config->weight =
gpr_parse_nonnegative_int(it->second.string_value().c_str());
if (child_config->weight == -1) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:unparseable value"));
} else if (child_config->weight == 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:value must be greater than zero"));
}
}
// Child policy.
it = json.object_value().find("childPolicy");
if (it != json.object_value().end()) {
grpc_error* parse_error = GRPC_ERROR_NONE;
child_config->config =
LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second,
&parse_error);
if (child_config->config == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
std::vector<grpc_error*> child_errors;
child_errors.push_back(parse_error);
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors));
}
}
return error_list;
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_weighted_target_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::WeightedTargetLbFactory>());
}
void grpc_lb_policy_weighted_target_shutdown() {}

@ -37,9 +37,9 @@ namespace {
constexpr char kCds[] = "cds_experimental";
// Config for this LB policy.
class CdsConfig : public LoadBalancingPolicy::Config {
class CdsLbConfig : public LoadBalancingPolicy::Config {
public:
explicit CdsConfig(std::string cluster) : cluster_(std::move(cluster)) {}
explicit CdsLbConfig(std::string cluster) : cluster_(std::move(cluster)) {}
const std::string& cluster() const { return cluster_; }
const char* name() const override { return kCds; }
@ -50,7 +50,7 @@ class CdsConfig : public LoadBalancingPolicy::Config {
// CDS LB policy.
class CdsLb : public LoadBalancingPolicy {
public:
explicit CdsLb(Args args);
CdsLb(RefCountedPtr<XdsClient> xds_client, Args args);
const char* name() const override { return kCds; }
@ -89,7 +89,7 @@ class CdsLb : public LoadBalancingPolicy {
void ShutdownLocked() override;
RefCountedPtr<CdsConfig> config_;
RefCountedPtr<CdsLbConfig> config_;
// Current channel args from the resolver.
const grpc_channel_args* args_ = nullptr;
@ -124,21 +124,37 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
}
// Construct config for child policy.
Json::Object child_config = {
{"edsServiceName",
(cluster_data.eds_service_name.empty() ? parent_->config_->cluster()
: cluster_data.eds_service_name)},
{"clusterName", parent_->config_->cluster()},
{"localityPickingPolicy",
Json::Array{
Json::Object{
{"weighted_target_experimental",
Json::Object{
{"targets", Json::Object()},
}},
},
}},
{"endpointPickingPolicy",
Json::Array{
Json::Object{
{"round_robin", Json::Object()},
},
}},
};
if (!cluster_data.eds_service_name.empty()) {
child_config["edsServiceName"] = cluster_data.eds_service_name;
}
if (cluster_data.lrs_load_reporting_server_name.has_value()) {
child_config["lrsLoadReportingServerName"] =
cluster_data.lrs_load_reporting_server_name.value();
}
Json json = Json::Array{
Json::Object{
{"xds_experimental", std::move(child_config)},
{"eds_experimental", std::move(child_config)},
},
};
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
std::string json_str = json.Dump();
std::string json_str = json.Dump(/*indent=*/1);
gpr_log(GPR_INFO, "[cdslb %p] generated config for child policy: %s",
parent_.get(), json_str.c_str());
}
@ -156,19 +172,19 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
args.args = parent_->args_;
args.channel_control_helper = absl::make_unique<Helper>(parent_->Ref());
parent_->child_policy_ =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"xds_experimental", std::move(args));
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(config->name(),
std::move(args));
if (parent_->child_policy_ == nullptr) {
OnError(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"failed to create xds_experimental child policy"));
"failed to create child policy"));
return;
}
grpc_pollset_set_add_pollset_set(
parent_->child_policy_->interested_parties(),
parent_->interested_parties());
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created child policy xds_experimental (%p)",
parent_.get(), parent_->child_policy_.get());
gpr_log(GPR_INFO, "[cdslb %p] created child policy %s (%p)",
parent_.get(), config->name(), parent_->child_policy_.get());
}
}
// Update child policy.
@ -232,9 +248,8 @@ void CdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
// CdsLb
//
CdsLb::CdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
xds_client_(XdsClient::GetFromChannelArgs(*args.args)) {
CdsLb::CdsLb(RefCountedPtr<XdsClient> xds_client, Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created -- using xds client %p from channel",
this, xds_client_.get());
@ -313,11 +328,19 @@ void CdsLb::UpdateLocked(UpdateArgs args) {
// factory
//
class CdsFactory : public LoadBalancingPolicyFactory {
class CdsLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<CdsLb>(std::move(args));
RefCountedPtr<XdsClient> xds_client =
XdsClient::GetFromChannelArgs(*args.args);
if (xds_client == nullptr) {
gpr_log(GPR_ERROR,
"XdsClient not present in channel args -- cannot instantiate "
"cds LB policy");
return nullptr;
}
return MakeOrphanable<CdsLb>(std::move(xds_client), std::move(args));
}
const char* name() const override { return kCds; }
@ -349,7 +372,7 @@ class CdsFactory : public LoadBalancingPolicyFactory {
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Cds Parser", &error_list);
return nullptr;
}
return MakeRefCounted<CdsConfig>(std::move(cluster));
return MakeRefCounted<CdsLbConfig>(std::move(cluster));
}
};
@ -364,7 +387,7 @@ class CdsFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_cds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::CdsFactory>());
absl::make_unique<grpc_core::CdsLbFactory>());
}
void grpc_lb_policy_cds_shutdown() {}

@ -0,0 +1,907 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <limits.h>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/xds/xds_client.h"
#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/uri/uri_parser.h"
#define GRPC_EDS_DEFAULT_FALLBACK_TIMEOUT 10000
namespace grpc_core {
TraceFlag grpc_lb_eds_trace(false, "eds_lb");
namespace {
constexpr char kEds[] = "eds_experimental";
// Config for EDS LB policy.
class EdsLbConfig : public LoadBalancingPolicy::Config {
public:
EdsLbConfig(std::string cluster_name, std::string eds_service_name,
absl::optional<std::string> lrs_load_reporting_server_name,
Json locality_picking_policy, Json endpoint_picking_policy)
: cluster_name_(std::move(cluster_name)),
eds_service_name_(std::move(eds_service_name)),
lrs_load_reporting_server_name_(
std::move(lrs_load_reporting_server_name)),
locality_picking_policy_(std::move(locality_picking_policy)),
endpoint_picking_policy_(std::move(endpoint_picking_policy)) {}
const char* name() const override { return kEds; }
const std::string& cluster_name() const { return cluster_name_; }
const std::string& eds_service_name() const { return eds_service_name_; }
const absl::optional<std::string>& lrs_load_reporting_server_name() const {
return lrs_load_reporting_server_name_;
};
const Json& locality_picking_policy() const {
return locality_picking_policy_;
}
const Json& endpoint_picking_policy() const {
return endpoint_picking_policy_;
}
private:
std::string cluster_name_;
std::string eds_service_name_;
absl::optional<std::string> lrs_load_reporting_server_name_;
Json locality_picking_policy_;
Json endpoint_picking_policy_;
};
// EDS LB policy.
class EdsLb : public LoadBalancingPolicy {
public:
explicit EdsLb(Args args);
const char* name() const override { return kEds; }
void UpdateLocked(UpdateArgs args) override;
void ResetBackoffLocked() override;
private:
class EndpointWatcher;
// A simple wrapper for ref-counting a picker from the child policy.
class ChildPickerWrapper : public RefCounted<ChildPickerWrapper> {
public:
explicit ChildPickerWrapper(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// A picker that handles drops.
class DropPicker : public SubchannelPicker {
public:
explicit DropPicker(EdsLb* eds_policy);
PickResult Pick(PickArgs args) override;
private:
RefCountedPtr<XdsApi::DropConfig> drop_config_;
RefCountedPtr<XdsClusterDropStats> drop_stats_;
RefCountedPtr<ChildPickerWrapper> child_picker_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<EdsLb> eds_policy)
: eds_policy_(std::move(eds_policy)) {}
~Helper() { eds_policy_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
// This is a no-op, because we get the addresses from the xds
// client, which is a watch-based API.
void RequestReresolution() override {}
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<EdsLb> eds_policy_;
};
~EdsLb();
void ShutdownLocked() override;
void UpdatePriorityList(XdsApi::PriorityListUpdate priority_list_update);
void UpdateChildPolicyLocked();
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
ServerAddressList CreateChildPolicyAddressesLocked();
RefCountedPtr<Config> CreateChildPolicyConfigLocked();
grpc_channel_args* CreateChildPolicyArgsLocked(
const grpc_channel_args* args_in);
void MaybeUpdateDropPickerLocked();
// Caller must ensure that config_ is set before calling.
const StringView GetEdsResourceName() const {
if (xds_client_from_channel_ == nullptr) return server_name_;
if (!config_->eds_service_name().empty()) {
return config_->eds_service_name();
}
return config_->cluster_name();
}
// Returns a pair containing the cluster and eds_service_name to use
// for LRS load reporting.
// Caller must ensure that config_ is set before calling.
std::pair<StringView, StringView> GetLrsClusterKey() const {
if (xds_client_from_channel_ == nullptr) return {server_name_, nullptr};
return {config_->cluster_name(), config_->eds_service_name()};
}
XdsClient* xds_client() const {
return xds_client_from_channel_ != nullptr ? xds_client_from_channel_.get()
: xds_client_.get();
}
// Server name from target URI.
std::string server_name_;
// Current channel args and config from the resolver.
const grpc_channel_args* args_ = nullptr;
RefCountedPtr<EdsLbConfig> config_;
// Internal state.
bool shutting_down_ = false;
// The xds client and endpoint watcher.
// If we get the XdsClient from the channel, we store it in
// xds_client_from_channel_; if we create it ourselves, we store it in
// xds_client_.
RefCountedPtr<XdsClient> xds_client_from_channel_;
OrphanablePtr<XdsClient> xds_client_;
// A pointer to the endpoint watcher, to be used when cancelling the watch.
// Note that this is not owned, so this pointer must never be derefernced.
EndpointWatcher* endpoint_watcher_ = nullptr;
// The latest data from the endpoint watcher.
XdsApi::PriorityListUpdate priority_list_update_;
// State used to retain child policy names for priority policy.
std::vector<size_t /*child_number*/> priority_child_numbers_;
RefCountedPtr<XdsApi::DropConfig> drop_config_;
RefCountedPtr<XdsClusterDropStats> drop_stats_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
// The latest state and picker returned from the child policy.
grpc_connectivity_state child_state_;
RefCountedPtr<ChildPickerWrapper> child_picker_;
};
//
// EdsLb::DropPicker
//
EdsLb::DropPicker::DropPicker(EdsLb* eds_policy)
: drop_config_(eds_policy->drop_config_),
drop_stats_(eds_policy->drop_stats_),
child_picker_(eds_policy->child_picker_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] constructed new drop picker %p", eds_policy,
this);
}
}
EdsLb::PickResult EdsLb::DropPicker::Pick(PickArgs args) {
// Handle drop.
const std::string* drop_category;
if (drop_config_->ShouldDrop(&drop_category)) {
if (drop_stats_ != nullptr) drop_stats_->AddCallDropped(*drop_category);
PickResult result;
result.type = PickResult::PICK_COMPLETE;
return result;
}
// If we're not dropping all calls, we should always have a child picker.
if (child_picker_ == nullptr) { // Should never happen.
PickResult result;
result.type = PickResult::PICK_FAILED;
result.error =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"eds drop picker not given any child picker"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL);
return result;
}
// Not dropping, so delegate to child's picker.
return child_picker_->Pick(args);
}
//
// EdsLb::Helper
//
RefCountedPtr<SubchannelInterface> EdsLb::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (eds_policy_->shutting_down_) return nullptr;
return eds_policy_->channel_control_helper()->CreateSubchannel(args);
}
void EdsLb::Helper::UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) {
if (eds_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] child policy updated state=%s picker=%p",
eds_policy_.get(), ConnectivityStateName(state), picker.get());
}
// Save the state and picker.
eds_policy_->child_state_ = state;
eds_policy_->child_picker_ =
MakeRefCounted<ChildPickerWrapper>(std::move(picker));
// Wrap the picker in a DropPicker and pass it up.
eds_policy_->MaybeUpdateDropPickerLocked();
}
void EdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
if (eds_policy_->shutting_down_) return;
eds_policy_->channel_control_helper()->AddTraceEvent(severity, message);
}
//
// EdsLb::EndpointWatcher
//
class EdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
public:
explicit EndpointWatcher(RefCountedPtr<EdsLb> eds_policy)
: eds_policy_(std::move(eds_policy)) {}
~EndpointWatcher() { eds_policy_.reset(DEBUG_LOCATION, "EndpointWatcher"); }
void OnEndpointChanged(XdsApi::EdsUpdate update) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Received EDS update from xds client",
eds_policy_.get());
}
// Update the drop config.
const bool drop_config_changed =
eds_policy_->drop_config_ == nullptr ||
*eds_policy_->drop_config_ != *update.drop_config;
if (drop_config_changed) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Updating drop config", eds_policy_.get());
}
eds_policy_->drop_config_ = std::move(update.drop_config);
eds_policy_->MaybeUpdateDropPickerLocked();
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Drop config unchanged, ignoring",
eds_policy_.get());
}
// Update priority and locality info.
if (eds_policy_->child_policy_ == nullptr ||
eds_policy_->priority_list_update_ != update.priority_list_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Updating priority list",
eds_policy_.get());
}
eds_policy_->UpdatePriorityList(std::move(update.priority_list_update));
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Priority list unchanged, ignoring",
eds_policy_.get());
}
}
void OnError(grpc_error* error) override {
gpr_log(GPR_ERROR, "[edslb %p] xds watcher reported error: %s",
eds_policy_.get(), grpc_error_string(error));
// Go into TRANSIENT_FAILURE if we have not yet created the child
// policy (i.e., we have not yet received data from xds). Otherwise,
// we keep running with the data we had previously.
if (eds_policy_->child_policy_ == nullptr) {
eds_policy_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
} else {
GRPC_ERROR_UNREF(error);
}
}
private:
RefCountedPtr<EdsLb> eds_policy_;
};
//
// EdsLb public methods
//
EdsLb::EdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
xds_client_from_channel_(XdsClient::GetFromChannelArgs(*args.args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] created -- xds client from channel: %p", this,
xds_client_from_channel_.get());
}
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] server name from channel: %s", this,
server_name_.c_str());
}
grpc_uri_destroy(uri);
}
EdsLb::~EdsLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] destroying xds LB policy", this);
}
grpc_channel_args_destroy(args_);
}
void EdsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] shutting down", this);
}
shutting_down_ = true;
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
child_picker_.reset();
if (child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
child_policy_.reset();
}
drop_stats_.reset();
// Cancel the endpoint watch here instead of in our dtor if we are using the
// xds resolver, because the watcher holds a ref to us and we might not be
// destroying the XdsClient, leading to a situation where this LB policy is
// never destroyed.
if (xds_client_from_channel_ != nullptr) {
if (config_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] cancelling xds watch for %s", this,
std::string(GetEdsResourceName()).c_str());
}
xds_client()->CancelEndpointDataWatch(GetEdsResourceName(),
endpoint_watcher_);
}
xds_client_from_channel_.reset();
}
xds_client_.reset();
}
void EdsLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Received update", this);
}
const bool is_initial_update = args_ == nullptr;
// Update config.
auto old_config = std::move(config_);
config_ = std::move(args.config);
// Update args.
grpc_channel_args_destroy(args_);
args_ = args.args;
args.args = nullptr;
if (is_initial_update) {
// Initialize XdsClient.
if (xds_client_from_channel_ == nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>(
combiner(), interested_parties(), GetEdsResourceName(),
nullptr /* service config watcher */, *args_, &error);
// TODO(roth): If we decide that we care about EDS-only mode, add
// proper error handling here.
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Created xds client %p", this,
xds_client_.get());
}
}
}
// Update drop stats for load reporting if needed.
if (is_initial_update || config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name()) {
drop_stats_.reset();
if (config_->lrs_load_reporting_server_name().has_value()) {
const auto key = GetLrsClusterKey();
drop_stats_ = xds_client()->AddClusterDropStats(
config_->lrs_load_reporting_server_name().value(),
key.first /*cluster_name*/, key.second /*eds_service_name*/);
}
MaybeUpdateDropPickerLocked();
}
// Update child policy if needed.
// Note that this comes after updating drop_stats_, since we want that
// to be used by any new picker we create here.
if (child_policy_ != nullptr) UpdateChildPolicyLocked();
// Create endpoint watcher if needed.
if (is_initial_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] starting xds watch for %s", this,
std::string(GetEdsResourceName()).c_str());
}
auto watcher = absl::make_unique<EndpointWatcher>(
Ref(DEBUG_LOCATION, "EndpointWatcher"));
endpoint_watcher_ = watcher.get();
xds_client()->WatchEndpointData(GetEdsResourceName(), std::move(watcher));
}
}
void EdsLb::ResetBackoffLocked() {
// When the XdsClient is instantiated in the resolver instead of in this
// LB policy, this is done via the resolver, so we don't need to do it
// for xds_client_from_channel_ here.
if (xds_client_ != nullptr) xds_client_->ResetBackoff();
if (child_policy_ != nullptr) {
child_policy_->ResetBackoffLocked();
}
}
//
// child policy-related methods
//
void EdsLb::UpdatePriorityList(
XdsApi::PriorityListUpdate priority_list_update) {
// Build some maps from locality to child number and the reverse from
// the old data in priority_list_update_ and priority_child_numbers_.
std::map<XdsLocalityName*, size_t /*child_number*/, XdsLocalityName::Less>
locality_child_map;
std::map<size_t, std::set<XdsLocalityName*>> child_locality_map;
for (uint32_t priority = 0; priority < priority_list_update_.size();
++priority) {
auto* locality_map = priority_list_update_.Find(priority);
GPR_ASSERT(locality_map != nullptr);
size_t child_number = priority_child_numbers_[priority];
for (const auto& p : locality_map->localities) {
XdsLocalityName* locality_name = p.first.get();
locality_child_map[locality_name] = child_number;
child_locality_map[child_number].insert(locality_name);
}
}
// Construct new list of children.
std::vector<size_t> priority_child_numbers;
for (uint32_t priority = 0; priority < priority_list_update.size();
++priority) {
auto* locality_map = priority_list_update.Find(priority);
GPR_ASSERT(locality_map != nullptr);
absl::optional<size_t> child_number;
// If one of the localities in this priority already existed, reuse its
// child number.
for (const auto& p : locality_map->localities) {
XdsLocalityName* locality_name = p.first.get();
if (!child_number.has_value()) {
auto it = locality_child_map.find(locality_name);
if (it != locality_child_map.end()) {
child_number = it->second;
locality_child_map.erase(it);
// Remove localities that *used* to be in this child number, so
// that we don't incorrectly reuse this child number for a
// subsequent priority.
for (XdsLocalityName* old_locality :
child_locality_map[*child_number]) {
locality_child_map.erase(old_locality);
}
}
} else {
// Remove all localities that are now in this child number, so
// that we don't accidentally reuse this child number for a
// subsequent priority.
locality_child_map.erase(locality_name);
}
}
// If we didn't find an existing child number, assign a new one.
if (!child_number.has_value()) {
for (child_number = 0;
child_locality_map.find(*child_number) != child_locality_map.end();
++(*child_number))
;
// Add entry so we know that the child number is in use.
// (Don't need to add the list of localities, since we won't use them.)
child_locality_map[*child_number];
}
priority_child_numbers.push_back(*child_number);
}
// Save update.
priority_list_update_ = std::move(priority_list_update);
priority_child_numbers_ = std::move(priority_child_numbers);
// Update child policy.
UpdateChildPolicyLocked();
}
ServerAddressList EdsLb::CreateChildPolicyAddressesLocked() {
ServerAddressList addresses;
for (uint32_t priority = 0; priority < priority_list_update_.size();
++priority) {
std::string priority_child_name =
absl::StrCat("child", priority_child_numbers_[priority]);
const auto* locality_map = priority_list_update_.Find(priority);
GPR_ASSERT(locality_map != nullptr);
for (const auto& p : locality_map->localities) {
const auto& locality_name = p.first;
const auto& locality = p.second;
std::vector<std::string> hierarchical_path = {
priority_child_name, locality_name->AsHumanReadableString()};
for (size_t i = 0; i < locality.serverlist.size(); ++i) {
const ServerAddress& address = locality.serverlist[i];
grpc_arg new_arg = MakeHierarchicalPathArg(hierarchical_path);
grpc_channel_args* args =
grpc_channel_args_copy_and_add(address.args(), &new_arg, 1);
addresses.emplace_back(address.address(), args);
}
}
}
return addresses;
}
RefCountedPtr<LoadBalancingPolicy::Config>
EdsLb::CreateChildPolicyConfigLocked() {
Json::Object priority_children;
Json::Array priority_priorities;
for (uint32_t priority = 0; priority < priority_list_update_.size();
++priority) {
const auto* locality_map = priority_list_update_.Find(priority);
GPR_ASSERT(locality_map != nullptr);
Json::Object weighted_targets;
for (const auto& p : locality_map->localities) {
XdsLocalityName* locality_name = p.first.get();
const auto& locality = p.second;
// Construct JSON object containing locality name.
Json::Object locality_name_json;
if (!locality_name->region().empty()) {
locality_name_json["region"] = locality_name->region();
}
if (!locality_name->zone().empty()) {
locality_name_json["zone"] = locality_name->zone();
}
if (!locality_name->sub_zone().empty()) {
locality_name_json["subzone"] = locality_name->sub_zone();
}
// Construct endpoint-picking policy.
// Wrap it in the LRS policy if load reporting is enabled.
Json endpoint_picking_policy;
if (config_->lrs_load_reporting_server_name().has_value()) {
const auto key = GetLrsClusterKey();
Json::Object lrs_config = {
{"clusterName", std::string(key.first)},
{"locality", std::move(locality_name_json)},
{"lrsLoadReportingServerName",
config_->lrs_load_reporting_server_name().value()},
{"childPolicy", config_->endpoint_picking_policy()},
};
if (!key.second.empty()) {
lrs_config["edsServiceName"] = std::string(key.second);
}
endpoint_picking_policy = Json::Array{Json::Object{
{"lrs_experimental", std::move(lrs_config)},
}};
} else {
endpoint_picking_policy = config_->endpoint_picking_policy();
}
// Add weighted target entry.
weighted_targets[locality_name->AsHumanReadableString()] = Json::Object{
{"weight", locality.lb_weight},
{"childPolicy", std::move(endpoint_picking_policy)},
};
}
// Add priority entry.
const size_t child_number = priority_child_numbers_[priority];
std::string child_name = absl::StrCat("child", child_number);
priority_priorities.emplace_back(child_name);
Json locality_picking_config = config_->locality_picking_policy();
Json::Object& config =
*(*locality_picking_config.mutable_array())[0].mutable_object();
auto it = config.begin();
GPR_ASSERT(it != config.end());
(*it->second.mutable_object())["targets"] = std::move(weighted_targets);
priority_children[child_name] = Json::Object{
{"config", std::move(locality_picking_config)},
};
}
Json json = Json::Array{Json::Object{
{"priority_experimental",
Json::Object{
{"children", std::move(priority_children)},
{"priorities", std::move(priority_priorities)},
}},
}};
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
std::string json_str = json.Dump(/*indent=*/1);
gpr_log(GPR_INFO, "[edslb %p] generated config for child policy: %s", this,
json_str.c_str());
}
grpc_error* error = GRPC_ERROR_NONE;
RefCountedPtr<LoadBalancingPolicy::Config> config =
LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(json, &error);
if (error != GRPC_ERROR_NONE) {
// This should never happen, but if it does, we basically have no
// way to fix it, so we put the channel in TRANSIENT_FAILURE.
gpr_log(GPR_ERROR,
"[edslb %p] error parsing generated child policy config -- "
"will put channel in TRANSIENT_FAILURE: %s",
this, grpc_error_string(error));
error = grpc_error_set_int(
grpc_error_add_child(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"eds LB policy: error parsing generated child policy config"),
error),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL);
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
return nullptr;
}
return config;
}
void EdsLb::UpdateChildPolicyLocked() {
if (shutting_down_) return;
UpdateArgs update_args;
update_args.config = CreateChildPolicyConfigLocked();
if (update_args.config == nullptr) return;
update_args.addresses = CreateChildPolicyAddressesLocked();
update_args.args = CreateChildPolicyArgsLocked(args_);
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(update_args.args);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] Updating child policy %p", this,
child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
grpc_channel_args* EdsLb::CreateChildPolicyArgsLocked(
const grpc_channel_args* args) {
absl::InlinedVector<grpc_arg, 3> args_to_add = {
// A channel arg indicating if the target is a backend inferred from an
// xds load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER),
1),
// Inhibit client-side health checking, since the balancer does
// this for us.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1),
};
if (xds_client_from_channel_ == nullptr) {
args_to_add.emplace_back(xds_client_->MakeChannelArg());
}
return grpc_channel_args_copy_and_add(args, args_to_add.data(),
args_to_add.size());
}
OrphanablePtr<LoadBalancingPolicy> EdsLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"priority_experimental", std::move(lb_policy_args));
if (GPR_UNLIKELY(lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "[edslb %p] failure creating child policy", this);
return nullptr;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p]: Created new child policy %p", this,
lb_policy.get());
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void EdsLb::MaybeUpdateDropPickerLocked() {
// If we're dropping all calls, report READY, regardless of what (or
// whether) the child has reported.
if (drop_config_ != nullptr && drop_config_->drop_all()) {
channel_control_helper()->UpdateState(GRPC_CHANNEL_READY,
absl::make_unique<DropPicker>(this));
return;
}
// Update only if we have a child picker.
if (child_picker_ != nullptr) {
channel_control_helper()->UpdateState(child_state_,
absl::make_unique<DropPicker>(this));
}
}
//
// factory
//
class EdsLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<EdsChildHandler>(std::move(args), &grpc_lb_eds_trace);
}
const char* name() const override { return kEds; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// eds was mentioned as a policy in the deprecated loadBalancingPolicy
// field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:eds policy requires configuration. "
"Please use loadBalancingConfig field of service config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// EDS service name.
std::string eds_service_name;
auto it = json.object_value().find("edsServiceName");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:edsServiceName error:type should be string"));
} else {
eds_service_name = it->second.string_value();
}
}
// Cluster name.
std::string cluster_name;
it = json.object_value().find("clusterName");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:required field missing"));
} else if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:type should be string"));
} else {
cluster_name = it->second.string_value();
}
// LRS load reporting server name.
absl::optional<std::string> lrs_load_reporting_server_name;
it = json.object_value().find("lrsLoadReportingServerName");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:lrsLoadReportingServerName error:type should be string"));
} else {
lrs_load_reporting_server_name.emplace(it->second.string_value());
}
}
// Locality-picking policy.
Json locality_picking_policy;
it = json.object_value().find("localityPickingPolicy");
if (it == json.object_value().end()) {
locality_picking_policy = Json::Array{
Json::Object{
{"weighted_target_experimental",
Json::Object{
{"targets", Json::Object()},
}},
},
};
} else {
locality_picking_policy = it->second;
}
grpc_error* parse_error = GRPC_ERROR_NONE;
if (LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
locality_picking_policy, &parse_error) == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"localityPickingPolicy", &parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
}
// Endpoint-picking policy. Called "childPolicy" for xds policy.
Json endpoint_picking_policy;
it = json.object_value().find("endpointPickingPolicy");
if (it == json.object_value().end()) {
endpoint_picking_policy = Json::Array{
Json::Object{
{"round_robin", Json::Object()},
},
};
} else {
endpoint_picking_policy = it->second;
}
parse_error = GRPC_ERROR_NONE;
if (LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
endpoint_picking_policy, &parse_error) == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"endpointPickingPolicy", &parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
}
// Construct config.
if (error_list.empty()) {
return MakeRefCounted<EdsLbConfig>(
std::move(cluster_name), std::move(eds_service_name),
std::move(lrs_load_reporting_server_name),
std::move(locality_picking_policy),
std::move(endpoint_picking_policy));
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"eds_experimental LB policy config", &error_list);
return nullptr;
}
}
private:
class EdsChildHandler : public ChildPolicyHandler {
public:
EdsChildHandler(Args args, TraceFlag* tracer)
: ChildPolicyHandler(std::move(args), tracer) {}
bool ConfigChangeRequiresNewPolicyInstance(
LoadBalancingPolicy::Config* old_config,
LoadBalancingPolicy::Config* new_config) const override {
GPR_ASSERT(old_config->name() == kEds);
GPR_ASSERT(new_config->name() == kEds);
EdsLbConfig* old_eds_config = static_cast<EdsLbConfig*>(old_config);
EdsLbConfig* new_eds_config = static_cast<EdsLbConfig*>(new_config);
return old_eds_config->cluster_name() != new_eds_config->cluster_name() ||
old_eds_config->eds_service_name() !=
new_eds_config->eds_service_name();
}
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
const char* name, LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<EdsLb>(std::move(args));
}
};
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_eds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::EdsLbFactory>());
}
void grpc_lb_policy_eds_shutdown() {}

@ -0,0 +1,524 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/xds/xds_client.h"
#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
namespace grpc_core {
TraceFlag grpc_lb_lrs_trace(false, "lrs_lb");
namespace {
constexpr char kLrs[] = "lrs_experimental";
// Config for LRS LB policy.
class LrsLbConfig : public LoadBalancingPolicy::Config {
public:
LrsLbConfig(RefCountedPtr<LoadBalancingPolicy::Config> child_policy,
std::string cluster_name, std::string eds_service_name,
std::string lrs_load_reporting_server_name,
RefCountedPtr<XdsLocalityName> locality_name)
: child_policy_(std::move(child_policy)),
cluster_name_(std::move(cluster_name)),
eds_service_name_(std::move(eds_service_name)),
lrs_load_reporting_server_name_(
std::move(lrs_load_reporting_server_name)),
locality_name_(std::move(locality_name)) {}
const char* name() const override { return kLrs; }
RefCountedPtr<LoadBalancingPolicy::Config> child_policy() const {
return child_policy_;
}
const std::string& cluster_name() const { return cluster_name_; }
const std::string& eds_service_name() const { return eds_service_name_; }
const std::string& lrs_load_reporting_server_name() const {
return lrs_load_reporting_server_name_;
};
RefCountedPtr<XdsLocalityName> locality_name() const {
return locality_name_;
}
private:
RefCountedPtr<LoadBalancingPolicy::Config> child_policy_;
std::string cluster_name_;
std::string eds_service_name_;
std::string lrs_load_reporting_server_name_;
RefCountedPtr<XdsLocalityName> locality_name_;
};
// LRS LB policy.
class LrsLb : public LoadBalancingPolicy {
public:
LrsLb(RefCountedPtr<XdsClient> xds_client, Args args);
const char* name() const override { return kLrs; }
void UpdateLocked(UpdateArgs args) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
private:
// A simple wrapper for ref-counting a picker from the child policy.
class RefCountedPicker : public RefCounted<RefCountedPicker> {
public:
explicit RefCountedPicker(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// A picker that wraps the picker from the child to perform load reporting.
class LoadReportingPicker : public SubchannelPicker {
public:
LoadReportingPicker(RefCountedPtr<RefCountedPicker> picker,
RefCountedPtr<XdsClusterLocalityStats> locality_stats)
: picker_(std::move(picker)),
locality_stats_(std::move(locality_stats)) {}
PickResult Pick(PickArgs args);
private:
RefCountedPtr<RefCountedPicker> picker_;
RefCountedPtr<XdsClusterLocalityStats> locality_stats_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<LrsLb> lrs_policy)
: lrs_policy_(std::move(lrs_policy)) {}
~Helper() { lrs_policy_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<LrsLb> lrs_policy_;
};
~LrsLb();
void ShutdownLocked() override;
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void UpdateChildPolicyLocked(ServerAddressList addresses,
const grpc_channel_args* args);
void MaybeUpdatePickerLocked();
// Current config from the resolver.
RefCountedPtr<LrsLbConfig> config_;
// Internal state.
bool shutting_down_ = false;
// The xds client.
RefCountedPtr<XdsClient> xds_client_;
// The stats for client-side load reporting.
RefCountedPtr<XdsClusterLocalityStats> locality_stats_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
// Latest state and picker reported by the child policy.
grpc_connectivity_state state_ = GRPC_CHANNEL_IDLE;
RefCountedPtr<RefCountedPicker> picker_;
};
//
// LrsLb::LoadReportingPicker
//
LoadBalancingPolicy::PickResult LrsLb::LoadReportingPicker::Pick(
LoadBalancingPolicy::PickArgs args) {
// Forward the pick to the picker returned from the child policy.
PickResult result = picker_->Pick(args);
if (result.type == PickResult::PICK_COMPLETE &&
result.subchannel != nullptr) {
// Record a call started.
locality_stats_->AddCallStarted();
// Intercept the recv_trailing_metadata op to record call completion.
XdsClusterLocalityStats* locality_stats =
locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release();
result.recv_trailing_metadata_ready =
// Note: This callback does not run in either the control plane
// combiner or in the data plane mutex.
[locality_stats](grpc_error* error, MetadataInterface* /*metadata*/,
CallState* /*call_state*/) {
const bool call_failed = error != GRPC_ERROR_NONE;
locality_stats->AddCallFinished(call_failed);
locality_stats->Unref(DEBUG_LOCATION, "LocalityStats+call");
};
}
return result;
}
//
// LrsLb
//
LrsLb::LrsLb(RefCountedPtr<XdsClient> xds_client, Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] created -- using xds client %p from channel",
this, xds_client_.get());
}
}
LrsLb::~LrsLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] destroying xds LB policy", this);
}
}
void LrsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] shutting down", this);
}
shutting_down_ = true;
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
if (child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
child_policy_.reset();
}
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_.reset();
locality_stats_.reset();
xds_client_.reset();
}
void LrsLb::ExitIdleLocked() {
if (child_policy_ != nullptr) child_policy_->ExitIdleLocked();
}
void LrsLb::ResetBackoffLocked() {
// The XdsClient will have its backoff reset by the xds resolver, so we
// don't need to do it here.
if (child_policy_ != nullptr) child_policy_->ResetBackoffLocked();
}
void LrsLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Received update", this);
}
// Update config.
auto old_config = std::move(config_);
config_ = std::move(args.config);
// Update load reporting if needed.
if (old_config == nullptr ||
config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name() ||
config_->cluster_name() != old_config->cluster_name() ||
config_->eds_service_name() != old_config->eds_service_name() ||
*config_->locality_name() != *old_config->locality_name()) {
locality_stats_ = xds_client_->AddClusterLocalityStats(
config_->lrs_load_reporting_server_name(), config_->cluster_name(),
config_->eds_service_name(), config_->locality_name());
MaybeUpdatePickerLocked();
}
// Update child policy.
UpdateChildPolicyLocked(std::move(args.addresses), args.args);
args.args = nullptr; // Ownership passed to UpdateChildPolicyLocked().
}
void LrsLb::MaybeUpdatePickerLocked() {
if (picker_ != nullptr) {
auto lrs_picker =
absl::make_unique<LoadReportingPicker>(picker_, locality_stats_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] updating connectivity: state=%s picker=%p",
this, ConnectivityStateName(state_), lrs_picker.get());
}
channel_control_helper()->UpdateState(state_, std::move(lrs_picker));
}
}
OrphanablePtr<LoadBalancingPolicy> LrsLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_lrs_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Created new child policy handler %p", this,
lb_policy.get());
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void LrsLb::UpdateChildPolicyLocked(ServerAddressList addresses,
const grpc_channel_args* args) {
// Create policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args);
}
// Construct update args.
UpdateArgs update_args;
update_args.addresses = std::move(addresses);
update_args.config = config_->child_policy();
update_args.args = args;
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Updating child policy handler %p", this,
child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
//
// LrsLb::Helper
//
RefCountedPtr<SubchannelInterface> LrsLb::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (lrs_policy_->shutting_down_) return nullptr;
return lrs_policy_->channel_control_helper()->CreateSubchannel(args);
}
void LrsLb::Helper::UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) {
if (lrs_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO,
"[lrs_lb %p] child connectivity state update: state=%s picker=%p",
lrs_policy_.get(), ConnectivityStateName(state), picker.get());
}
// Save the state and picker.
lrs_policy_->state_ = state;
lrs_policy_->picker_ = MakeRefCounted<RefCountedPicker>(std::move(picker));
// Wrap the picker and return it to the channel.
lrs_policy_->MaybeUpdatePickerLocked();
}
void LrsLb::Helper::RequestReresolution() {
if (lrs_policy_->shutting_down_) return;
lrs_policy_->channel_control_helper()->RequestReresolution();
}
void LrsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
if (lrs_policy_->shutting_down_) return;
lrs_policy_->channel_control_helper()->AddTraceEvent(severity, message);
}
//
// factory
//
class LrsLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
RefCountedPtr<XdsClient> xds_client =
XdsClient::GetFromChannelArgs(*args.args);
if (xds_client == nullptr) {
gpr_log(GPR_ERROR,
"XdsClient not present in channel args -- cannot instantiate "
"lrs LB policy");
return nullptr;
}
return MakeOrphanable<LrsLb>(std::move(xds_client), std::move(args));
}
const char* name() const override { return kLrs; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// lrs was mentioned as a policy in the deprecated loadBalancingPolicy
// field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:lrs policy requires configuration. "
"Please use loadBalancingConfig field of service config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Child policy.
RefCountedPtr<LoadBalancingPolicy::Config> child_policy;
auto it = json.object_value().find("childPolicy");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:childPolicy error:required field missing"));
} else {
grpc_error* parse_error = GRPC_ERROR_NONE;
child_policy = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
it->second, &parse_error);
if (child_policy == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
std::vector<grpc_error*> child_errors;
child_errors.push_back(parse_error);
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors));
}
}
// Cluster name.
std::string cluster_name;
it = json.object_value().find("clusterName");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:required field missing"));
} else if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:type should be string"));
} else {
cluster_name = it->second.string_value();
}
// EDS service name.
std::string eds_service_name;
it = json.object_value().find("edsServiceName");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:edsServiceName error:type should be string"));
} else {
eds_service_name = it->second.string_value();
}
}
// Locality.
RefCountedPtr<XdsLocalityName> locality_name;
it = json.object_value().find("locality");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:locality error:required field missing"));
} else {
std::vector<grpc_error*> child_errors =
ParseLocality(it->second, &locality_name);
if (!child_errors.empty()) {
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:locality", &child_errors));
}
}
// LRS load reporting server name.
std::string lrs_load_reporting_server_name;
it = json.object_value().find("lrsLoadReportingServerName");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:lrsLoadReportingServerName error:required field missing"));
} else if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:lrsLoadReportingServerName error:type should be string"));
} else {
lrs_load_reporting_server_name = it->second.string_value();
}
if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"lrs_experimental LB policy config", &error_list);
return nullptr;
}
return MakeRefCounted<LrsLbConfig>(
std::move(child_policy), std::move(cluster_name),
std::move(eds_service_name), std::move(lrs_load_reporting_server_name),
std::move(locality_name));
}
private:
static std::vector<grpc_error*> ParseLocality(
const Json& json, RefCountedPtr<XdsLocalityName>* name) {
std::vector<grpc_error*> error_list;
if (json.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"locality field is not an object"));
return error_list;
}
std::string region;
auto it = json.object_value().find("region");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"region\" field is not a string"));
} else {
region = it->second.string_value();
}
}
std::string zone;
it = json.object_value().find("zone");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"zone\" field is not a string"));
} else {
zone = it->second.string_value();
}
}
std::string subzone;
it = json.object_value().find("subzone");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"subzone\" field is not a string"));
} else {
subzone = it->second.string_value();
}
}
if (region.empty() && zone.empty() && subzone.empty()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"at least one of region, zone, or subzone must be set"));
}
if (error_list.empty()) {
*name = MakeRefCounted<XdsLocalityName>(region, zone, subzone);
}
return error_list;
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_lrs_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::LrsLbFactory>());
}
void grpc_lb_policy_lrs_shutdown() {}

File diff suppressed because it is too large Load Diff

@ -29,5 +29,4 @@
#define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER \
"grpc.address_is_backend_from_xds_load_balancer"
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H \
*/
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H */

@ -35,6 +35,8 @@ class RegistryState {
void RegisterLoadBalancingPolicyFactory(
std::unique_ptr<LoadBalancingPolicyFactory> factory) {
gpr_log(GPR_DEBUG, "registering LB policy factory for \"%s\"",
factory->name());
for (size_t i = 0; i < factories_.size(); ++i) {
GPR_ASSERT(strcmp(factories_[i]->name(), factory->name()) != 0);
}

@ -1420,13 +1420,6 @@ grpc_error* EdsResponseParse(
if (error != GRPC_ERROR_NONE) return error;
}
}
// Validate the update content.
if (eds_update.priority_list_update.empty() &&
!eds_update.drop_config->drop_all()) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"EDS response doesn't contain any valid "
"locality but doesn't require to drop all calls.");
}
eds_update_map->emplace(std::string(cluster_name.data, cluster_name.size),
std::move(eds_update));
}

@ -42,11 +42,7 @@ class XdsLocalityName : public RefCounted<XdsLocalityName> {
struct Less {
bool operator()(const XdsLocalityName* lhs,
const XdsLocalityName* rhs) const {
int cmp_result = lhs->region_.compare(rhs->region_);
if (cmp_result != 0) return cmp_result < 0;
cmp_result = lhs->zone_.compare(rhs->zone_);
if (cmp_result != 0) return cmp_result < 0;
return lhs->sub_zone_.compare(rhs->sub_zone_) < 0;
return lhs->Compare(*rhs) < 0;
}
bool operator()(const RefCountedPtr<XdsLocalityName>& lhs,
@ -65,6 +61,18 @@ class XdsLocalityName : public RefCounted<XdsLocalityName> {
sub_zone_ == other.sub_zone_;
}
bool operator!=(const XdsLocalityName& other) const {
return !(*this == other);
}
int Compare(const XdsLocalityName& other) const {
int cmp_result = region_.compare(other.region_);
if (cmp_result != 0) return cmp_result;
cmp_result = zone_.compare(other.zone_);
if (cmp_result != 0) return cmp_result;
return sub_zone_.compare(other.sub_zone_);
}
const std::string& region() const { return region_; }
const std::string& zone() const { return zone_; }
const std::string& sub_zone() const { return sub_zone_; }

@ -21,6 +21,8 @@
#include <assert.h>
#include <string.h>
#include "absl/types/optional.h"
#include <grpc/compression.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
@ -40,94 +42,156 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
static void start_send_message_batch(void* arg, grpc_error* unused);
static void send_message_on_complete(void* arg, grpc_error* error);
static void on_send_message_next_done(void* arg, grpc_error* error);
namespace {
struct channel_data {
class ChannelData {
public:
explicit ChannelData(grpc_channel_element_args* args) {
// Get the enabled and the default algorithms from channel args.
enabled_compression_algorithms_bitset_ =
grpc_channel_args_compression_algorithm_get_states(args->channel_args);
default_compression_algorithm_ =
grpc_channel_args_get_channel_default_compression_algorithm(
args->channel_args);
// Make sure the default is enabled.
if (!GPR_BITGET(enabled_compression_algorithms_bitset_,
default_compression_algorithm_)) {
const char* name;
GPR_ASSERT(grpc_compression_algorithm_name(default_compression_algorithm_,
&name) == 1);
gpr_log(GPR_ERROR,
"default compression algorithm %s not enabled: switching to none",
name);
default_compression_algorithm_ = GRPC_COMPRESS_NONE;
}
enabled_message_compression_algorithms_bitset_ =
grpc_compression_bitset_to_message_bitset(
enabled_compression_algorithms_bitset_);
enabled_stream_compression_algorithms_bitset_ =
grpc_compression_bitset_to_stream_bitset(
enabled_compression_algorithms_bitset_);
GPR_ASSERT(!args->is_last);
}
grpc_compression_algorithm default_compression_algorithm() const {
return default_compression_algorithm_;
}
uint32_t enabled_compression_algorithms_bitset() const {
return enabled_compression_algorithms_bitset_;
}
uint32_t enabled_message_compression_algorithms_bitset() const {
return enabled_message_compression_algorithms_bitset_;
}
uint32_t enabled_stream_compression_algorithms_bitset() const {
return enabled_stream_compression_algorithms_bitset_;
}
private:
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
grpc_compression_algorithm default_compression_algorithm_;
/** Bitset of enabled compression algorithms */
uint32_t enabled_compression_algorithms_bitset;
uint32_t enabled_compression_algorithms_bitset_;
/** Bitset of enabled message compression algorithms */
uint32_t enabled_message_compression_algorithms_bitset;
uint32_t enabled_message_compression_algorithms_bitset_;
/** Bitset of enabled stream compression algorithms */
uint32_t enabled_stream_compression_algorithms_bitset;
uint32_t enabled_stream_compression_algorithms_bitset_;
};
struct call_data {
call_data(grpc_call_element* elem, const grpc_call_element_args& args)
: call_combiner(args.call_combiner) {
channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
class CallData {
public:
CallData(grpc_call_element* elem, const grpc_call_element_args& args)
: call_combiner_(args.call_combiner) {
ChannelData* channeld = static_cast<ChannelData*>(elem->channel_data);
// The call's message compression algorithm is set to channel's default
// setting. It can be overridden later by initial metadata.
if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset,
channeld->default_compression_algorithm))) {
message_compression_algorithm =
if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset(),
channeld->default_compression_algorithm()))) {
message_compression_algorithm_ =
grpc_compression_algorithm_to_message_compression_algorithm(
channeld->default_compression_algorithm);
channeld->default_compression_algorithm());
}
GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner,
start_send_message_batch, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner_,
StartSendMessageBatch, elem, grpc_schedule_on_exec_ctx);
}
~call_data() {
if (state_initialized) {
grpc_slice_buffer_destroy_internal(&slices);
~CallData() {
if (state_initialized_) {
grpc_slice_buffer_destroy_internal(&slices_);
}
GRPC_ERROR_UNREF(cancel_error);
GRPC_ERROR_UNREF(cancel_error_);
}
grpc_core::CallCombiner* call_combiner;
grpc_message_compression_algorithm message_compression_algorithm =
void CompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch);
private:
bool SkipMessageCompression();
void InitializeState(grpc_call_element* elem);
grpc_error* ProcessSendInitialMetadata(grpc_call_element* elem,
grpc_metadata_batch* initial_metadata);
// Methods for processing a send_message batch
static void StartSendMessageBatch(void* elem_arg, grpc_error* unused);
static void OnSendMessageNextDone(void* elem_arg, grpc_error* error);
grpc_error* PullSliceFromSendMessage();
void ContinueReadingSendMessage(grpc_call_element* elem);
void FinishSendMessage(grpc_call_element* elem);
void SendMessageBatchContinue(grpc_call_element* elem);
static void FailSendMessageBatchInCallCombiner(void* calld_arg,
grpc_error* error);
static void SendMessageOnComplete(void* calld_arg, grpc_error* error);
grpc_core::CallCombiner* call_combiner_;
grpc_message_compression_algorithm message_compression_algorithm_ =
GRPC_MESSAGE_COMPRESS_NONE;
grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_transport_stream_op_batch* send_message_batch = nullptr;
bool seen_initial_metadata = false;
grpc_error* cancel_error_ = GRPC_ERROR_NONE;
grpc_transport_stream_op_batch* send_message_batch_ = nullptr;
bool seen_initial_metadata_ = false;
/* Set to true, if the fields below are initialized. */
bool state_initialized = false;
grpc_closure start_send_message_batch_in_call_combiner;
bool state_initialized_ = false;
grpc_closure start_send_message_batch_in_call_combiner_;
/* The fields below are only initialized when we compress the payload.
* Keep them at the bottom of the struct, so they don't pollute the
* cache-lines. */
grpc_linked_mdelem message_compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
grpc_linked_mdelem accept_stream_encoding_storage;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream>
replacement_stream;
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
grpc_linked_mdelem message_compression_algorithm_storage_;
grpc_linked_mdelem stream_compression_algorithm_storage_;
grpc_linked_mdelem accept_encoding_storage_;
grpc_linked_mdelem accept_stream_encoding_storage_;
grpc_slice_buffer slices_; /**< Buffers up input slices to be compressed */
// Allocate space for the replacement stream
std::aligned_storage<sizeof(grpc_core::SliceBufferByteStream),
alignof(grpc_core::SliceBufferByteStream)>::type
replacement_stream_;
grpc_closure* original_send_message_on_complete_ = nullptr;
grpc_closure send_message_on_complete_;
grpc_closure on_send_message_next_done_;
};
} // namespace
// Returns true if we should skip message compression for the current message.
static bool skip_message_compression(grpc_call_element* elem) {
call_data* calld = static_cast<call_data*>(elem->call_data);
bool CallData::SkipMessageCompression() {
// If the flags of this message indicate that it shouldn't be compressed, we
// skip message compression.
uint32_t flags =
calld->send_message_batch->payload->send_message.send_message->flags();
send_message_batch_->payload->send_message.send_message->flags();
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
}
// If this call doesn't have any message compression algorithm set, skip
// message compression.
return calld->message_compression_algorithm == GRPC_MESSAGE_COMPRESS_NONE;
return message_compression_algorithm_ == GRPC_MESSAGE_COMPRESS_NONE;
}
// Determines the compression algorithm from the initial metadata and the
// channel's default setting.
static grpc_compression_algorithm find_compression_algorithm(
grpc_metadata_batch* initial_metadata, channel_data* channeld) {
grpc_compression_algorithm FindCompressionAlgorithm(
grpc_metadata_batch* initial_metadata, ChannelData* channeld) {
if (initial_metadata->idx.named.grpc_internal_encoding_request == nullptr) {
return channeld->default_compression_algorithm;
return channeld->default_compression_algorithm();
}
grpc_compression_algorithm compression_algorithm;
// Parse the compression algorithm from the initial metadata.
@ -143,7 +207,7 @@ static grpc_compression_algorithm find_compression_algorithm(
// enabled.
// TODO(juanlishen): Maybe use channel default or abort() if the algorithm
// from the initial metadata is disabled.
if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset,
if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset(),
compression_algorithm))) {
return compression_algorithm;
}
@ -158,30 +222,24 @@ static grpc_compression_algorithm find_compression_algorithm(
return GRPC_COMPRESS_NONE;
}
static void initialize_state(grpc_call_element* elem, call_data* calld) {
GPR_DEBUG_ASSERT(!calld->state_initialized);
calld->state_initialized = true;
grpc_slice_buffer_init(&calld->slices);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete,
::send_message_on_complete, elem,
void CallData::InitializeState(grpc_call_element* elem) {
GPR_DEBUG_ASSERT(!state_initialized_);
state_initialized_ = true;
grpc_slice_buffer_init(&slices_);
GRPC_CLOSURE_INIT(&send_message_on_complete_, SendMessageOnComplete, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
::on_send_message_next_done, elem,
GRPC_CLOSURE_INIT(&on_send_message_next_done_, OnSendMessageNextDone, elem,
grpc_schedule_on_exec_ctx);
}
static grpc_error* process_send_initial_metadata(
grpc_call_element* elem,
grpc_metadata_batch* initial_metadata) GRPC_MUST_USE_RESULT;
static grpc_error* process_send_initial_metadata(
grpc_error* CallData::ProcessSendInitialMetadata(
grpc_call_element* elem, grpc_metadata_batch* initial_metadata) {
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
ChannelData* channeld = static_cast<ChannelData*>(elem->channel_data);
// Find the compression algorithm.
grpc_compression_algorithm compression_algorithm =
find_compression_algorithm(initial_metadata, channeld);
FindCompressionAlgorithm(initial_metadata, channeld);
// Note that at most one of the following algorithms can be set.
calld->message_compression_algorithm =
message_compression_algorithm_ =
grpc_compression_algorithm_to_message_compression_algorithm(
compression_algorithm);
grpc_stream_compression_algorithm stream_compression_algorithm =
@ -189,321 +247,300 @@ static grpc_error* process_send_initial_metadata(
compression_algorithm);
// Hint compression algorithm.
grpc_error* error = GRPC_ERROR_NONE;
if (calld->message_compression_algorithm != GRPC_MESSAGE_COMPRESS_NONE) {
initialize_state(elem, calld);
if (message_compression_algorithm_ != GRPC_MESSAGE_COMPRESS_NONE) {
InitializeState(elem);
error = grpc_metadata_batch_add_tail(
initial_metadata, &calld->message_compression_algorithm_storage,
initial_metadata, &message_compression_algorithm_storage_,
grpc_message_compression_encoding_mdelem(
calld->message_compression_algorithm),
message_compression_algorithm_),
GRPC_BATCH_GRPC_ENCODING);
} else if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
initialize_state(elem, calld);
InitializeState(elem);
error = grpc_metadata_batch_add_tail(
initial_metadata, &calld->stream_compression_algorithm_storage,
initial_metadata, &stream_compression_algorithm_storage_,
grpc_stream_compression_encoding_mdelem(stream_compression_algorithm),
GRPC_BATCH_CONTENT_ENCODING);
}
if (error != GRPC_ERROR_NONE) return error;
// Convey supported compression algorithms.
error = grpc_metadata_batch_add_tail(
initial_metadata, &calld->accept_encoding_storage,
initial_metadata, &accept_encoding_storage_,
GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
channeld->enabled_message_compression_algorithms_bitset),
channeld->enabled_message_compression_algorithms_bitset()),
GRPC_BATCH_GRPC_ACCEPT_ENCODING);
if (error != GRPC_ERROR_NONE) return error;
// Do not overwrite accept-encoding header if it already presents (e.g., added
// by some proxy).
if (!initial_metadata->idx.named.accept_encoding) {
error = grpc_metadata_batch_add_tail(
initial_metadata, &calld->accept_stream_encoding_storage,
initial_metadata, &accept_stream_encoding_storage_,
GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(
channeld->enabled_stream_compression_algorithms_bitset),
channeld->enabled_stream_compression_algorithms_bitset()),
GRPC_BATCH_ACCEPT_ENCODING);
}
return error;
}
static void send_message_on_complete(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_slice_buffer_reset_and_unref_internal(&calld->slices);
void CallData::SendMessageOnComplete(void* calld_arg, grpc_error* error) {
CallData* calld = static_cast<CallData*>(calld_arg);
grpc_slice_buffer_reset_and_unref_internal(&calld->slices_);
grpc_core::Closure::Run(DEBUG_LOCATION,
calld->original_send_message_on_complete,
calld->original_send_message_on_complete_,
GRPC_ERROR_REF(error));
}
static void send_message_batch_continue(grpc_call_element* elem) {
call_data* calld = static_cast<call_data*>(elem->call_data);
void CallData::SendMessageBatchContinue(grpc_call_element* elem) {
// Note: The call to grpc_call_next_op() results in yielding the
// call combiner, so we need to clear calld->send_message_batch
// before we do that.
grpc_transport_stream_op_batch* send_message_batch =
calld->send_message_batch;
calld->send_message_batch = nullptr;
// call combiner, so we need to clear send_message_batch_ before we do that.
grpc_transport_stream_op_batch* send_message_batch = send_message_batch_;
send_message_batch_ = nullptr;
grpc_call_next_op(elem, send_message_batch);
}
static void finish_send_message(grpc_call_element* elem) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_DEBUG_ASSERT(calld->message_compression_algorithm !=
void CallData::FinishSendMessage(grpc_call_element* elem) {
GPR_DEBUG_ASSERT(message_compression_algorithm_ !=
GRPC_MESSAGE_COMPRESS_NONE);
// Compress the data if appropriate.
grpc_slice_buffer tmp;
grpc_slice_buffer_init(&tmp);
uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags();
bool did_compress = grpc_msg_compress(calld->message_compression_algorithm,
&calld->slices, &tmp);
send_message_batch_->payload->send_message.send_message->flags();
bool did_compress =
grpc_msg_compress(message_compression_algorithm_, &slices_, &tmp);
if (did_compress) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
const char* algo_name;
const size_t before_size = calld->slices.length;
const size_t before_size = slices_.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - static_cast<float>(after_size) /
static_cast<float>(before_size);
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
message_compression_algorithm_, &algo_name));
gpr_log(GPR_INFO,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}
grpc_slice_buffer_swap(&calld->slices, &tmp);
grpc_slice_buffer_swap(&slices_, &tmp);
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
const char* algo_name;
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
message_compression_algorithm_, &algo_name));
gpr_log(GPR_INFO,
"Algorithm '%s' enabled but decided not to compress. Input size: "
"%" PRIuPTR,
algo_name, calld->slices.length);
algo_name, slices_.length);
}
}
grpc_slice_buffer_destroy_internal(&tmp);
// Swap out the original byte stream with our new one and send the
// batch down.
calld->replacement_stream.Init(&calld->slices, send_flags);
calld->send_message_batch->payload->send_message.send_message.reset(
calld->replacement_stream.get());
calld->original_send_message_on_complete =
calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete;
send_message_batch_continue(elem);
new (&replacement_stream_)
grpc_core::SliceBufferByteStream(&slices_, send_flags);
send_message_batch_->payload->send_message.send_message.reset(
reinterpret_cast<grpc_core::SliceBufferByteStream*>(
&replacement_stream_));
original_send_message_on_complete_ = send_message_batch_->on_complete;
send_message_batch_->on_complete = &send_message_on_complete_;
SendMessageBatchContinue(elem);
}
static void fail_send_message_batch_in_call_combiner(void* arg,
grpc_error* error) {
call_data* calld = static_cast<call_data*>(arg);
if (calld->send_message_batch != nullptr) {
void CallData::FailSendMessageBatchInCallCombiner(void* calld_arg,
grpc_error* error) {
CallData* calld = static_cast<CallData*>(calld_arg);
if (calld->send_message_batch_ != nullptr) {
grpc_transport_stream_op_batch_finish_with_failure(
calld->send_message_batch, GRPC_ERROR_REF(error), calld->call_combiner);
calld->send_message_batch = nullptr;
calld->send_message_batch_, GRPC_ERROR_REF(error),
calld->call_combiner_);
calld->send_message_batch_ = nullptr;
}
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
static grpc_error* pull_slice_from_send_message(call_data* calld) {
// Pulls a slice from the send_message byte stream and adds it to slices_.
grpc_error* CallData::PullSliceFromSendMessage() {
grpc_slice incoming_slice;
grpc_error* error =
calld->send_message_batch->payload->send_message.send_message->Pull(
send_message_batch_->payload->send_message.send_message->Pull(
&incoming_slice);
if (error == GRPC_ERROR_NONE) {
grpc_slice_buffer_add(&calld->slices, incoming_slice);
grpc_slice_buffer_add(&slices_, incoming_slice);
}
return error;
}
// Reads as many slices as possible from the send_message byte stream.
// If all data has been read, invokes finish_send_message(). Otherwise,
// If all data has been read, invokes FinishSendMessage(). Otherwise,
// an async call to ByteStream::Next() has been started, which will
// eventually result in calling on_send_message_next_done().
static void continue_reading_send_message(grpc_call_element* elem) {
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length()) {
finish_send_message(elem);
// eventually result in calling OnSendMessageNextDone().
void CallData::ContinueReadingSendMessage(grpc_call_element* elem) {
if (slices_.length ==
send_message_batch_->payload->send_message.send_message->length()) {
FinishSendMessage(elem);
return;
}
while (calld->send_message_batch->payload->send_message.send_message->Next(
~static_cast<size_t>(0), &calld->on_send_message_next_done)) {
grpc_error* error = pull_slice_from_send_message(calld);
while (send_message_batch_->payload->send_message.send_message->Next(
~static_cast<size_t>(0), &on_send_message_next_done_)) {
grpc_error* error = PullSliceFromSendMessage();
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(calld, error);
FailSendMessageBatchInCallCombiner(this, error);
GRPC_ERROR_UNREF(error);
return;
}
if (calld->slices.length == calld->send_message_batch->payload->send_message
.send_message->length()) {
finish_send_message(elem);
if (slices_.length ==
send_message_batch_->payload->send_message.send_message->length()) {
FinishSendMessage(elem);
break;
}
}
}
// Async callback for ByteStream::Next().
static void on_send_message_next_done(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
call_data* calld = static_cast<call_data*>(elem->call_data);
void CallData::OnSendMessageNextDone(void* elem_arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(elem_arg);
CallData* calld = static_cast<CallData*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(calld, error);
FailSendMessageBatchInCallCombiner(calld, error);
return;
}
error = pull_slice_from_send_message(calld);
error = calld->PullSliceFromSendMessage();
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(calld, error);
FailSendMessageBatchInCallCombiner(calld, error);
GRPC_ERROR_UNREF(error);
return;
}
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length()) {
finish_send_message(elem);
if (calld->slices_.length == calld->send_message_batch_->payload->send_message
.send_message->length()) {
calld->FinishSendMessage(elem);
} else {
continue_reading_send_message(elem);
calld->ContinueReadingSendMessage(elem);
}
}
static void start_send_message_batch(void* arg, grpc_error* /*unused*/) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
if (skip_message_compression(elem)) {
send_message_batch_continue(elem);
void CallData::StartSendMessageBatch(void* elem_arg, grpc_error* /*unused*/) {
grpc_call_element* elem = static_cast<grpc_call_element*>(elem_arg);
CallData* calld = static_cast<CallData*>(elem->call_data);
if (calld->SkipMessageCompression()) {
calld->SendMessageBatchContinue(elem);
} else {
continue_reading_send_message(elem);
calld->ContinueReadingSendMessage(elem);
}
}
static void compress_start_transport_stream_op_batch(
void CallData::CompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("compress_start_transport_stream_op_batch", 0);
call_data* calld = static_cast<call_data*>(elem->call_data);
// Handle cancel_stream.
if (batch->cancel_stream) {
GRPC_ERROR_UNREF(calld->cancel_error);
calld->cancel_error =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (calld->send_message_batch != nullptr) {
if (!calld->seen_initial_metadata) {
GRPC_ERROR_UNREF(cancel_error_);
cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (send_message_batch_ != nullptr) {
if (!seen_initial_metadata_) {
GRPC_CALL_COMBINER_START(
calld->call_combiner,
GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
call_combiner_,
GRPC_CLOSURE_CREATE(FailSendMessageBatchInCallCombiner, this,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
GRPC_ERROR_REF(cancel_error_), "failing send_message op");
} else {
calld->send_message_batch->payload->send_message.send_message->Shutdown(
GRPC_ERROR_REF(calld->cancel_error));
send_message_batch_->payload->send_message.send_message->Shutdown(
GRPC_ERROR_REF(cancel_error_));
}
}
} else if (calld->cancel_error != GRPC_ERROR_NONE) {
} else if (cancel_error_ != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
return;
}
// Handle send_initial_metadata.
if (batch->send_initial_metadata) {
GPR_ASSERT(!calld->seen_initial_metadata);
grpc_error* error = process_send_initial_metadata(
GPR_ASSERT(!seen_initial_metadata_);
grpc_error* error = ProcessSendInitialMetadata(
elem, batch->payload->send_initial_metadata.send_initial_metadata);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(batch, error,
calld->call_combiner);
call_combiner_);
return;
}
calld->seen_initial_metadata = true;
seen_initial_metadata_ = true;
// If we had previously received a batch containing a send_message op,
// handle it now. Note that we need to re-enter the call combiner
// for this, since we can't send two batches down while holding the
// call combiner, since the connected_channel filter (at the bottom of
// the call stack) will release the call combiner for each batch it sees.
if (calld->send_message_batch != nullptr) {
if (send_message_batch_ != nullptr) {
GRPC_CALL_COMBINER_START(
calld->call_combiner,
&calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
"starting send_message after send_initial_metadata");
call_combiner_, &start_send_message_batch_in_call_combiner_,
GRPC_ERROR_NONE, "starting send_message after send_initial_metadata");
}
}
// Handle send_message.
if (batch->send_message) {
GPR_ASSERT(calld->send_message_batch == nullptr);
calld->send_message_batch = batch;
GPR_ASSERT(send_message_batch_ == nullptr);
send_message_batch_ = batch;
// If we have not yet seen send_initial_metadata, then we have to
// wait. We save the batch in calld and then drop the call
// combiner, which we'll have to pick up again later when we get
// send_initial_metadata.
if (!calld->seen_initial_metadata) {
// wait. We save the batch and then drop the call combiner, which we'll
// have to pick up again later when we get send_initial_metadata.
if (!seen_initial_metadata_) {
GRPC_CALL_COMBINER_STOP(
calld->call_combiner,
"send_message batch pending send_initial_metadata");
call_combiner_, "send_message batch pending send_initial_metadata");
return;
}
start_send_message_batch(elem, GRPC_ERROR_NONE);
StartSendMessageBatch(elem, GRPC_ERROR_NONE);
} else {
// Pass control down the stack.
grpc_call_next_op(elem, batch);
}
}
void CompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
CallData* calld = static_cast<CallData*>(elem->call_data);
calld->CompressStartTransportStreamOpBatch(elem, batch);
}
/* Constructor for call_data */
static grpc_error* compress_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
new (elem->call_data) call_data(elem, *args);
grpc_error* CompressInitCallElem(grpc_call_element* elem,
const grpc_call_element_args* args) {
new (elem->call_data) CallData(elem, *args);
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
static void compress_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
void CompressDestroyCallElem(grpc_call_element* elem,
const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
CallData* calld = static_cast<CallData*>(elem->call_data);
calld->~CallData();
}
/* Constructor for channel_data */
static grpc_error* compress_init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
// Get the enabled and the default algorithms from channel args.
channeld->enabled_compression_algorithms_bitset =
grpc_channel_args_compression_algorithm_get_states(args->channel_args);
channeld->default_compression_algorithm =
grpc_channel_args_get_channel_default_compression_algorithm(
args->channel_args);
// Make sure the default is enabled.
if (!GPR_BITGET(channeld->enabled_compression_algorithms_bitset,
channeld->default_compression_algorithm)) {
const char* name;
GPR_ASSERT(grpc_compression_algorithm_name(
channeld->default_compression_algorithm, &name) == 1);
gpr_log(GPR_ERROR,
"default compression algorithm %s not enabled: switching to none",
name);
channeld->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
channeld->enabled_message_compression_algorithms_bitset =
grpc_compression_bitset_to_message_bitset(
channeld->enabled_compression_algorithms_bitset);
channeld->enabled_stream_compression_algorithms_bitset =
grpc_compression_bitset_to_stream_bitset(
channeld->enabled_compression_algorithms_bitset);
GPR_ASSERT(!args->is_last);
/* Constructor for ChannelData */
grpc_error* CompressInitChannelElem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
new (elem->channel_data) ChannelData(args);
return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
static void compress_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
void CompressDestroyChannelElem(grpc_channel_element* elem) {
ChannelData* channeld = static_cast<ChannelData*>(elem->channel_data);
channeld->~ChannelData();
}
} // namespace
const grpc_channel_filter grpc_message_compress_filter = {
compress_start_transport_stream_op_batch,
CompressStartTransportStreamOpBatch,
grpc_channel_next_op,
sizeof(call_data),
compress_init_call_elem,
sizeof(CallData),
CompressInitCallElem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
compress_destroy_call_elem,
sizeof(channel_data),
compress_init_channel_elem,
compress_destroy_channel_elem,
CompressDestroyCallElem,
sizeof(ChannelData),
CompressInitChannelElem,
CompressDestroyChannelElem,
grpc_channel_next_get_info,
"message_compress"};

@ -99,6 +99,7 @@ static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_core::TraceFlag grpc_http_trace(false, "http");
grpc_core::TraceFlag grpc_keepalive_trace(false, "http_keepalive");
grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount(false,
"chttp2_refcount");
@ -2817,7 +2818,8 @@ static void start_keepalive_ping_locked(void* arg, grpc_error* error) {
if (t->channelz_socket != nullptr) {
t->channelz_socket->RecordKeepaliveSent();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Start keepalive ping", t->peer_string);
}
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
@ -2840,7 +2842,8 @@ static void finish_keepalive_ping_locked(void* arg, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Finish keepalive ping", t->peer_string);
}
if (!t->keepalive_ping_started) {

@ -27,6 +27,7 @@
#include "src/core/lib/transport/transport.h"
extern grpc_core::TraceFlag grpc_http_trace;
extern grpc_core::TraceFlag grpc_keepalive_trace;
extern grpc_core::TraceFlag grpc_trace_http2_stream_state;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_hpack_parser;

@ -284,8 +284,8 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
/* clamp max recv hint to an allowable size */
if (max_size_hint >= UINT32_MAX - sent_init_window) {
max_recv_bytes = UINT32_MAX - sent_init_window;
if (max_size_hint >= kMaxWindowUpdateSize - sent_init_window) {
max_recv_bytes = kMaxWindowUpdateSize - sent_init_window;
} else {
max_recv_bytes = static_cast<uint32_t>(max_size_hint);
}
@ -298,7 +298,7 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
}
/* add some small lookahead to keep pipelines flowing */
GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
GPR_DEBUG_ASSERT(max_recv_bytes <= kMaxWindowUpdateSize - sent_init_window);
if (local_window_delta_ < max_recv_bytes) {
uint32_t add_max_recv_bytes =
static_cast<uint32_t>(max_recv_bytes - local_window_delta_);

@ -18,6 +18,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/context_list.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
@ -54,7 +55,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
/* ping already in-flight: wait */
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging",
t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
@ -64,7 +66,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data != 0) {
/* need to receive something of substance before sending a ping again */
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
@ -85,7 +88,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO,
"%s: Ping delayed [%p]: not enough time elapsed since last ping. "
" Last ping %f: Next ping %f: Now %f",
@ -116,7 +120,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
GRPC_STATS_INC_HTTP2_PINGS_SENT();
t->ping_state.last_ping_sent_time = now;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Ping sent [%s]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,

@ -1072,9 +1072,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
stream_state->pending_send_message = false;
if (stream_state->state_callback_received[OP_FAILED]) {
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
} else {
grpc_slice_buffer write_slice_buffer;
grpc_slice slice;
@ -1131,9 +1133,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
op_can_be_run(stream_op, s, &oas->state,
OP_SEND_TRAILING_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas);
if (stream_state->state_callback_received[OP_FAILED]) {
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
} else {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;

@ -31,7 +31,18 @@
chains are linear, then channel stacks provide a mechanism to minimize
allocations for that chain.
Call stacks are created by channel stacks and represent the per-call data
for that stack. */
for that stack.
Implementations should take care of the following details for a batch -
1. Synchronization is achieved with a CallCombiner. View
src/core/lib/iomgr/call_combiner.h for more details.
2. If the filter wants to inject an error on the way down, it needs to call
grpc_transport_stream_op_batch_finish_with_failure from within the call
combiner. This will cause any batch callbacks to be called with that error.
3. If the filter wants to inject an error on the way up (from a callback), it
should also inject that error in the recv_trailing_metadata callback so that
it can have an effect on the call status.
*/
#include <grpc/support/port_platform.h>

@ -210,7 +210,6 @@ static gpr_once g_probe_so_reuesport_once = GPR_ONCE_INIT;
static int g_support_so_reuseport = false;
void probe_so_reuseport_once(void) {
#ifndef GPR_MANYLINUX1
int s = socket(AF_INET, SOCK_STREAM, 0);
if (s < 0) {
/* This might be an ipv6-only environment in which case 'socket(AF_INET,..)'
@ -222,7 +221,6 @@ void probe_so_reuseport_once(void) {
"check for SO_REUSEPORT", grpc_set_socket_reuse_port(s, 1));
close(s);
}
#endif
}
bool grpc_is_socket_reuse_port_supported() {

@ -82,10 +82,17 @@ class grpc_alts_channel_security_connector final
tsi_handshaker* handshaker = nullptr;
const grpc_alts_credentials* creds =
static_cast<const grpc_alts_credentials*>(channel_creds());
GPR_ASSERT(alts_tsi_handshaker_create(creds->options(), target_name_,
creds->handshaker_service_url(), true,
interested_parties,
&handshaker) == TSI_OK);
size_t user_specified_max_frame_size = 0;
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_TSI_MAX_FRAME_SIZE);
if (arg != nullptr && arg->type == GRPC_ARG_INTEGER) {
user_specified_max_frame_size = grpc_channel_arg_get_integer(
arg, {0, 0, std::numeric_limits<int>::max()});
}
GPR_ASSERT(alts_tsi_handshaker_create(
creds->options(), target_name_,
creds->handshaker_service_url(), true, interested_parties,
&handshaker, user_specified_max_frame_size) == TSI_OK);
handshake_manager->Add(
grpc_core::SecurityHandshakerCreate(handshaker, this, args));
}
@ -140,9 +147,17 @@ class grpc_alts_server_security_connector final
tsi_handshaker* handshaker = nullptr;
const grpc_alts_server_credentials* creds =
static_cast<const grpc_alts_server_credentials*>(server_creds());
size_t user_specified_max_frame_size = 0;
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_TSI_MAX_FRAME_SIZE);
if (arg != nullptr && arg->type == GRPC_ARG_INTEGER) {
user_specified_max_frame_size = grpc_channel_arg_get_integer(
arg, {0, 0, std::numeric_limits<int>::max()});
}
GPR_ASSERT(alts_tsi_handshaker_create(
creds->options(), nullptr, creds->handshaker_service_url(),
false, interested_parties, &handshaker) == TSI_OK);
false, interested_parties, &handshaker,
user_specified_max_frame_size) == TSI_OK);
handshake_manager->Add(
grpc_core::SecurityHandshakerCreate(handshaker, this, args));
}

@ -577,7 +577,6 @@ static void publish_new_rpc(void* arg, grpc_error* error) {
rm->pending_tail->pending_next = calld;
rm->pending_tail = calld;
}
calld->pending_next = nullptr;
gpr_mu_unlock(&server->mu_call);
}

@ -36,8 +36,14 @@ void grpc_lb_policy_grpclb_init(void);
void grpc_lb_policy_grpclb_shutdown(void);
void grpc_lb_policy_cds_init(void);
void grpc_lb_policy_cds_shutdown(void);
void grpc_lb_policy_xds_init(void);
void grpc_lb_policy_xds_shutdown(void);
void grpc_lb_policy_eds_init(void);
void grpc_lb_policy_eds_shutdown(void);
void grpc_lb_policy_lrs_init(void);
void grpc_lb_policy_lrs_shutdown(void);
void grpc_lb_policy_priority_init(void);
void grpc_lb_policy_priority_shutdown(void);
void grpc_lb_policy_weighted_target_init(void);
void grpc_lb_policy_weighted_target_shutdown(void);
void grpc_lb_policy_pick_first_init(void);
void grpc_lb_policy_pick_first_shutdown(void);
void grpc_lb_policy_round_robin_init(void);
@ -78,8 +84,14 @@ void grpc_register_built_in_plugins(void) {
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_cds_init,
grpc_lb_policy_cds_shutdown);
grpc_register_plugin(grpc_lb_policy_xds_init,
grpc_lb_policy_xds_shutdown);
grpc_register_plugin(grpc_lb_policy_eds_init,
grpc_lb_policy_eds_shutdown);
grpc_register_plugin(grpc_lb_policy_lrs_init,
grpc_lb_policy_lrs_shutdown);
grpc_register_plugin(grpc_lb_policy_priority_init,
grpc_lb_policy_priority_shutdown);
grpc_register_plugin(grpc_lb_policy_weighted_target_init,
grpc_lb_policy_weighted_target_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,

@ -44,8 +44,14 @@ void grpc_lb_policy_grpclb_init(void);
void grpc_lb_policy_grpclb_shutdown(void);
void grpc_lb_policy_cds_init(void);
void grpc_lb_policy_cds_shutdown(void);
void grpc_lb_policy_xds_init(void);
void grpc_lb_policy_xds_shutdown(void);
void grpc_lb_policy_eds_init(void);
void grpc_lb_policy_eds_shutdown(void);
void grpc_lb_policy_lrs_init(void);
void grpc_lb_policy_lrs_shutdown(void);
void grpc_lb_policy_priority_init(void);
void grpc_lb_policy_priority_shutdown(void);
void grpc_lb_policy_weighted_target_init(void);
void grpc_lb_policy_weighted_target_shutdown(void);
void grpc_lb_policy_pick_first_init(void);
void grpc_lb_policy_pick_first_shutdown(void);
void grpc_lb_policy_round_robin_init(void);
@ -86,8 +92,14 @@ void grpc_register_built_in_plugins(void) {
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_cds_init,
grpc_lb_policy_cds_shutdown);
grpc_register_plugin(grpc_lb_policy_xds_init,
grpc_lb_policy_xds_shutdown);
grpc_register_plugin(grpc_lb_policy_eds_init,
grpc_lb_policy_eds_shutdown);
grpc_register_plugin(grpc_lb_policy_lrs_init,
grpc_lb_policy_lrs_shutdown);
grpc_register_plugin(grpc_lb_policy_priority_init,
grpc_lb_policy_priority_shutdown);
grpc_register_plugin(grpc_lb_policy_weighted_target_init,
grpc_lb_policy_weighted_target_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,

@ -102,6 +102,8 @@ typedef struct alts_grpc_handshaker_client {
bool receive_status_finished;
/* if non-null, contains arguments to complete a TSI next callback. */
recv_message_result* pending_recv_message_result;
/* Maximum frame size used by frame protector. */
size_t max_frame_size;
} alts_grpc_handshaker_client;
static void handshaker_client_send_buffer_destroy(
@ -506,6 +508,8 @@ static grpc_byte_buffer* get_serialized_start_client(
upb_strview_makez(ptr->data));
ptr = ptr->next;
}
grpc_gcp_StartClientHandshakeReq_set_max_frame_size(
start_client, static_cast<uint32_t>(client->max_frame_size));
return get_serialized_handshaker_req(req, arena.ptr());
}
@ -565,6 +569,8 @@ static grpc_byte_buffer* get_serialized_start_server(
arena.ptr());
grpc_gcp_RpcProtocolVersions_assign_from_struct(
server_version, arena.ptr(), &client->options->rpc_versions);
grpc_gcp_StartServerHandshakeReq_set_max_frame_size(
start_server, static_cast<uint32_t>(client->max_frame_size));
return get_serialized_handshaker_req(req, arena.ptr());
}
@ -674,7 +680,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create(
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client) {
bool is_client, size_t max_frame_size) {
if (channel == nullptr || handshaker_service_url == nullptr) {
gpr_log(GPR_ERROR, "Invalid arguments to alts_handshaker_client_create()");
return nullptr;
@ -694,6 +700,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create(
client->recv_bytes = grpc_empty_slice();
grpc_metadata_array_init(&client->recv_initial_metadata);
client->is_client = is_client;
client->max_frame_size = max_frame_size;
client->buffer_size = TSI_ALTS_INITIAL_BUFFER_SIZE;
client->buffer = static_cast<unsigned char*>(gpr_zalloc(client->buffer_size));
grpc_slice slice = grpc_slice_from_copied_string(handshaker_service_url);

@ -117,7 +117,7 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client);
* This method creates an ALTS handshaker client.
*
* - handshaker: ALTS TSI handshaker to which the created handshaker client
* belongs to.
* belongs to.
* - channel: grpc channel to ALTS handshaker service.
* - handshaker_service_url: address of ALTS handshaker service in the format of
* "host:port".
@ -132,8 +132,12 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client);
* - vtable_for_testing: ALTS handshaker client vtable instance used for
* testing purpose.
* - is_client: a boolean value indicating if the created handshaker client is
* used at the client (is_client = true) or server (is_client = false) side. It
* returns the created ALTS handshaker client on success, and NULL on failure.
* used at the client (is_client = true) or server (is_client = false) side.
* - max_frame_size: Maximum frame size used by frame protector (User specified
* maximum frame size if present or default max frame size).
*
* It returns the created ALTS handshaker client on success, and NULL
* on failure.
*/
alts_handshaker_client* alts_grpc_handshaker_client_create(
alts_tsi_handshaker* handshaker, grpc_channel* channel,
@ -141,7 +145,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create(
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client);
bool is_client, size_t max_frame_size);
/**
* This method handles handshaker response returned from ALTS handshaker

@ -63,6 +63,8 @@ struct alts_tsi_handshaker {
// shutdown effectively follows base.handshake_shutdown,
// but is synchronized by the mutex of this object.
bool shutdown;
// Maximum frame size used by frame protector.
size_t max_frame_size;
};
/* Main struct for ALTS TSI handshaker result. */
@ -75,6 +77,8 @@ typedef struct alts_tsi_handshaker_result {
grpc_slice rpc_versions;
bool is_client;
grpc_slice serialized_context;
// Peer's maximum frame size.
size_t max_frame_size;
} alts_tsi_handshaker_result;
static tsi_result handshaker_result_extract_peer(
@ -156,6 +160,26 @@ static tsi_result handshaker_result_create_zero_copy_grpc_protector(
alts_tsi_handshaker_result* result =
reinterpret_cast<alts_tsi_handshaker_result*>(
const_cast<tsi_handshaker_result*>(self));
// In case the peer does not send max frame size (e.g. peer is gRPC Go or
// peer uses an old binary), the negotiated frame size is set to
// kTsiAltsMinFrameSize (ignoring max_output_protected_frame_size value if
// present). Otherwise, it is based on peer and user specified max frame
// size (if present).
size_t max_frame_size = kTsiAltsMinFrameSize;
if (result->max_frame_size) {
size_t peer_max_frame_size = result->max_frame_size;
max_frame_size = std::min<size_t>(peer_max_frame_size,
max_output_protected_frame_size == nullptr
? kTsiAltsMaxFrameSize
: *max_output_protected_frame_size);
max_frame_size = std::max<size_t>(max_frame_size, kTsiAltsMinFrameSize);
}
max_output_protected_frame_size = &max_frame_size;
gpr_log(GPR_DEBUG,
"After Frame Size Negotiation, maximum frame size used by frame "
"protector equals %zu",
*max_output_protected_frame_size);
tsi_result ok = alts_zero_copy_grpc_protector_create(
reinterpret_cast<const uint8_t*>(result->key_data),
kAltsAes128GcmRekeyKeyLength, /*is_rekey=*/true, result->is_client,
@ -288,6 +312,7 @@ tsi_result alts_tsi_handshaker_result_create(grpc_gcp_HandshakerResp* resp,
static_cast<char*>(gpr_zalloc(peer_service_account.size + 1));
memcpy(result->peer_identity, peer_service_account.data,
peer_service_account.size);
result->max_frame_size = grpc_gcp_HandshakerResult_max_frame_size(hresult);
upb::Arena rpc_versions_arena;
bool serialized = grpc_gcp_rpc_protocol_versions_encode(
peer_rpc_version, rpc_versions_arena.ptr(), &result->rpc_versions);
@ -374,7 +399,8 @@ static tsi_result alts_tsi_handshaker_continue_handshaker_next(
handshaker, channel, handshaker->handshaker_service_url,
handshaker->interested_parties, handshaker->options,
handshaker->target_name, grpc_cb, cb, user_data,
handshaker->client_vtable_for_testing, handshaker->is_client);
handshaker->client_vtable_for_testing, handshaker->is_client,
handshaker->max_frame_size);
if (client == nullptr) {
gpr_log(GPR_ERROR, "Failed to create ALTS handshaker client");
return TSI_FAILED_PRECONDITION;
@ -570,7 +596,8 @@ bool alts_tsi_handshaker_has_shutdown(alts_tsi_handshaker* handshaker) {
tsi_result alts_tsi_handshaker_create(
const grpc_alts_credentials_options* options, const char* target_name,
const char* handshaker_service_url, bool is_client,
grpc_pollset_set* interested_parties, tsi_handshaker** self) {
grpc_pollset_set* interested_parties, tsi_handshaker** self,
size_t user_specified_max_frame_size) {
if (handshaker_service_url == nullptr || self == nullptr ||
options == nullptr || (is_client && target_name == nullptr)) {
gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_handshaker_create()");
@ -590,6 +617,9 @@ tsi_result alts_tsi_handshaker_create(
handshaker->has_created_handshaker_client = false;
handshaker->handshaker_service_url = gpr_strdup(handshaker_service_url);
handshaker->options = grpc_alts_credentials_options_copy(options);
handshaker->max_frame_size = user_specified_max_frame_size != 0
? user_specified_max_frame_size
: kTsiAltsMaxFrameSize;
handshaker->base.vtable = handshaker->use_dedicated_cq
? &handshaker_vtable_dedicated
: &handshaker_vtable;

@ -38,6 +38,11 @@
const size_t kTsiAltsNumOfPeerProperties = 5;
// Frame size negotiation extends send frame size range to
// [kTsiAltsMinFrameSize, kTsiAltsMaxFrameSize].
const size_t kTsiAltsMinFrameSize = 16 * 1024;
const size_t kTsiAltsMaxFrameSize = 128 * 1024;
typedef struct alts_tsi_handshaker alts_tsi_handshaker;
/**
@ -54,6 +59,8 @@ typedef struct alts_tsi_handshaker alts_tsi_handshaker;
* - interested_parties: set of pollsets interested in this connection.
* - self: address of ALTS TSI handshaker instance to be returned from the
* method.
* - user_specified_max_frame_size: Determines the maximum frame size used by
* frame protector that is specified via user. If unspecified, the value is 0.
*
* It returns TSI_OK on success and an error status code on failure. Note that
* if interested_parties is nullptr, a dedicated TSI thread will be created and
@ -62,7 +69,8 @@ typedef struct alts_tsi_handshaker alts_tsi_handshaker;
tsi_result alts_tsi_handshaker_create(
const grpc_alts_credentials_options* options, const char* target_name,
const char* handshaker_service_url, bool is_client,
grpc_pollset_set* interested_parties, tsi_handshaker** self);
grpc_pollset_set* interested_parties, tsi_handshaker** self,
size_t user_specified_max_frame_size);
/**
* This method creates an ALTS TSI handshaker result instance.

@ -58,7 +58,7 @@ $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc
```sh
$ cd grpc
$ git submodule update --init
$ make
$ EXTRA_DEFINES=GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK make
$ [sudo] make install
```

@ -117,6 +117,19 @@ class Call(RpcContext, metaclass=ABCMeta):
The details string of the RPC.
"""
@abstractmethod
async def wait_for_connection(self) -> None:
"""Waits until connected to peer and raises aio.AioRpcError if failed.
This is an EXPERIMENTAL method.
This method ensures the RPC has been successfully connected. Otherwise,
an AioRpcError will be raised to explain the reason of the connection
failure.
This method is recommended for building retry mechanisms.
"""
class UnaryUnaryCall(Generic[RequestType, ResponseType],
Call,

@ -18,7 +18,7 @@ import enum
import inspect
import logging
from functools import partial
from typing import AsyncIterable, Awaitable, Optional, Tuple
from typing import AsyncIterable, Optional, Tuple
import grpc
from grpc import _common
@ -250,9 +250,8 @@ class _APIStyle(enum.IntEnum):
class _UnaryResponseMixin(Call):
_call_response: asyncio.Task
def _init_unary_response_mixin(self,
response_coro: Awaitable[ResponseType]):
self._call_response = self._loop.create_task(response_coro)
def _init_unary_response_mixin(self, response_task: asyncio.Task):
self._call_response = response_task
def cancel(self) -> bool:
if super().cancel():
@ -458,6 +457,11 @@ class _StreamRequestMixin(Call):
self._raise_for_different_style(_APIStyle.READER_WRITER)
await self._done_writing()
async def wait_for_connection(self) -> None:
await self._metadata_sent.wait()
if self.done():
await self._raise_for_status()
class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
"""Object for managing unary-unary RPC calls.
@ -465,6 +469,7 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
Returned when an instance of `UnaryUnaryMultiCallable` object is called.
"""
_request: RequestType
_invocation_task: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self, request: RequestType, deadline: Optional[float],
@ -478,7 +483,8 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._request = request
self._init_unary_response_mixin(self._invoke())
self._invocation_task = loop.create_task(self._invoke())
self._init_unary_response_mixin(self._invocation_task)
async def _invoke(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
@ -500,6 +506,11 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
else:
return cygrpc.EOF
async def wait_for_connection(self) -> None:
await self._invocation_task
if self.done():
await self._raise_for_status()
class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
"""Object for managing unary-stream RPC calls.
@ -536,6 +547,11 @@ class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
self.cancel()
raise
async def wait_for_connection(self) -> None:
await self._send_unary_request_task
if self.done():
await self._raise_for_status()
class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
_base_call.StreamUnaryCall):
@ -557,7 +573,7 @@ class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
metadata, request_serializer, response_deserializer, loop)
self._init_stream_request_mixin(request_iterator)
self._init_unary_response_mixin(self._conduct_rpc())
self._init_unary_response_mixin(loop.create_task(self._conduct_rpc()))
async def _conduct_rpc(self) -> ResponseType:
try:

@ -330,6 +330,10 @@ class InterceptedUnaryUnaryCall(_base_call.UnaryUnaryCall):
response = yield from call.__await__()
return response
async def wait_for_connection(self) -> None:
call = await self._interceptors_task
return await call.wait_for_connection()
class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
"""Final UnaryUnaryCall class finished with a response."""
@ -374,3 +378,6 @@ class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
# for telling the interpreter that __await__ is a generator.
yield None
return self._response
async def wait_for_connection(self) -> None:
pass

@ -28,6 +28,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
@ -36,9 +37,12 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',

@ -28,5 +28,6 @@
"unit.server_interceptor_test.TestServerInterceptor",
"unit.server_test.TestServer",
"unit.timeout_test.TestTimeout",
"unit.wait_for_connection_test.TestWaitForConnection",
"unit.wait_for_ready_test.TestWaitForReady"
]

@ -16,23 +16,23 @@
import asyncio
import logging
import unittest
import datetime
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._test_base import AioTestBase
from tests.unit import resources
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit._constants import UNREACHABLE_TARGET
_SHORT_TIMEOUT_S = datetime.timedelta(seconds=1).total_seconds()
_NUM_STREAM_RESPONSES = 5
_RESPONSE_PAYLOAD_SIZE = 42
_REQUEST_PAYLOAD_SIZE = 7
_LOCAL_CANCEL_DETAILS_EXPECTATION = 'Locally cancelled by application!'
_RESPONSE_INTERVAL_US = test_constants.SHORT_TIMEOUT * 1000 * 1000
_UNREACHABLE_TARGET = '0.1:1111'
_RESPONSE_INTERVAL_US = int(_SHORT_TIMEOUT_S * 1000 * 1000)
_INFINITE_INTERVAL_US = 2**31 - 1
@ -78,7 +78,7 @@ class TestUnaryUnaryCall(_MulticallableTestMixin, AioTestBase):
self.assertIs(response, response_retry)
async def test_call_rpc_error(self):
async with aio.insecure_channel(_UNREACHABLE_TARGET) as channel:
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
call = stub.UnaryCall(messages_pb2.SimpleRequest())
@ -434,24 +434,24 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase):
interval_us=_RESPONSE_INTERVAL_US,
))
call = self._stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT * 2)
call = self._stub.StreamingOutputCall(request,
timeout=_SHORT_TIMEOUT_S * 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the same as the timeout
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 5 / 2)
self.assertGreater(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 5 / 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the timeout minus a unit of wait time
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertGreater(remained_time, _SHORT_TIMEOUT_S / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertEqual(grpc.StatusCode.OK, await call.code())
@ -538,14 +538,14 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase):
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(test_constants.SHORT_TIMEOUT)
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.StreamingInputCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2)
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())
@ -576,6 +576,33 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase):
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_call_rpc_error(self):
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
# The error should be raised automatically without any traffic.
call = stub.StreamingInputCall()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
async def test_timeout(self):
call = self._stub.StreamingInputCall(timeout=_SHORT_TIMEOUT_S)
# The error should be raised automatically without any traffic.
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, rpc_error.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, await call.code())
# Prepares the request that stream in a ping-pong manner.
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2.StreamingOutputCallRequest()
@ -733,14 +760,14 @@ class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase):
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(test_constants.SHORT_TIMEOUT)
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.FullDuplexCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2)
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())

@ -0,0 +1,159 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests behavior of the wait for connection API on client side."""
import asyncio
import logging
import unittest
import datetime
from typing import Callable, Tuple
import grpc
from grpc.experimental import aio
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit import _common
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests_aio.unit._constants import UNREACHABLE_TARGET
_REQUEST = b'\x01\x02\x03'
_TEST_METHOD = '/test/Test'
_NUM_STREAM_RESPONSES = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_PAYLOAD_SIZE = 42
class TestWaitForConnection(AioTestBase):
"""Tests if wait_for_connection raises connectivity issue."""
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._dummy_channel = aio.insecure_channel(UNREACHABLE_TARGET)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._dummy_channel.close()
await self._channel.close()
await self._server.stop(None)
async def test_unary_unary_ok(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
# No exception raised and no message swallowed.
await call.wait_for_connection()
response = await call
self.assertIsInstance(response, messages_pb2.SimpleResponse)
async def test_unary_stream_ok(self):
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
call = self._stub.StreamingOutputCall(request)
# No exception raised and no message swallowed.
await call.wait_for_connection()
response_cnt = 0
async for response in call:
response_cnt += 1
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_stream_unary_ok(self):
call = self._stub.StreamingInputCall()
# No exception raised and no message swallowed.
await call.wait_for_connection()
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
await call.done_writing()
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_stream_stream_ok(self):
call = self._stub.FullDuplexCall()
# No exception raised and no message swallowed.
await call.wait_for_connection()
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
response = await call.read()
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_unary_unary_error(self):
call = self._dummy_channel.unary_unary(_TEST_METHOD)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_unary_stream_error(self):
call = self._dummy_channel.unary_stream(_TEST_METHOD)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_stream_unary_error(self):
call = self._dummy_channel.stream_unary(_TEST_METHOD)()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_stream_stream_error(self):
call = self._dummy_channel.stream_stream(_TEST_METHOD)()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -14,10 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM google/dart:2.3
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart
FROM google/dart:2.7
# Define the default command.
CMD ["bash"]

@ -16,7 +16,11 @@
<%include file="../../python_stretch.include"/>
<%include file="../../compile_python_36.include"/>
<%include file="../../compile_python_38.include"/>
RUN apt-get update && apt-get install -y python3.5 python3.5-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5
RUN apt-get update && apt-get -t buster install -y python3.7 python3-all-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.7

@ -464,7 +464,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": \"fake:///lb\" } }\n"
" { \"eds_experimental\":{ \"clusterName\": \"foo\" } }\n"
" ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
@ -474,7 +474,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) {
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_STREQ(lb_config->name(), "xds_experimental");
EXPECT_STREQ(lb_config->name(), "eds_experimental");
}
TEST_F(ClientChannelParserTest, UnknownLoadBalancingConfig) {
@ -544,14 +544,14 @@ TEST_F(ClientChannelParserTest, UnknownLoadBalancingPolicy) {
}
TEST_F(ClientChannelParserTest, LoadBalancingPolicyXdsNotAllowed) {
const char* test_json = "{\"loadBalancingPolicy\":\"xds_experimental\"}";
const char* test_json = "{\"loadBalancingPolicy\":\"eds_experimental\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
std::regex regex(
"Service config parsing error.*referenced_errors.*"
"Global Params.*referenced_errors.*"
"Client channel global parser.*referenced_errors.*"
"field:loadBalancingPolicy error:xds_experimental requires "
"field:loadBalancingPolicy error:eds_experimental requires "
"a config. Please use loadBalancingConfig instead.");
VerifyRegexMatch(error, regex);
}

@ -3,5 +3,5 @@ forms a complete end-to-end test.
To add a new test or fixture:
- add the code to the relevant directory
- update gen_build_yaml.py to reflect the change
- update generate_tests.bzl to reflect the change
- regenerate projects

@ -16,22 +16,24 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
static const char oauth2_md[] = "Bearer aaslkfjs424535asdf";
static const char* client_identity_property_name = "smurf_name";
static const char* client_identity = "Brainy Smurf";
@ -139,6 +141,11 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture* f) {
static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* client_args) {
grpc_core::ExecCtx exec_ctx;
grpc_slice ca_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
const char* test_root_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr);
grpc_call_credentials* oauth2_creds = grpc_md_only_test_credentials_create(
@ -156,6 +163,7 @@ static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_channel_args_destroy(new_client_args);
grpc_channel_credentials_release(ssl_creds);
grpc_call_credentials_release(oauth2_creds);
grpc_slice_unref(ca_slice);
}
static int fail_server_auth_check(grpc_channel_args* server_args) {
@ -193,13 +201,23 @@ static grpc_auth_metadata_processor test_processor_create(int failing) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_server_credentials_set_auth_metadata_processor(
ssl_creds, test_processor_create(fail_server_auth_check(server_args)));
chttp2_init_server_secure_fullstack(f, server_args, ssl_creds);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
}
/* All test configurations */

@ -16,24 +16,26 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
struct fullstack_secure_fixture_data {
grpc_core::UniquePtr<char> localaddr;
};
@ -124,10 +126,20 @@ static int fail_server_auth_check(grpc_channel_args* server_args) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_cert_key_pair, 1, 0, nullptr);
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
if (fail_server_auth_check(server_args)) {
grpc_auth_metadata_processor processor = {process_auth_failure, nullptr,
nullptr};
@ -152,20 +164,9 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
FILE* roots_file;
size_t roots_size = strlen(test_root_cert);
char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
/* Set the SSL roots env var. */
roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
GPR_ASSERT(roots_filename != nullptr);
GPR_ASSERT(roots_file != nullptr);
GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
fclose(roots_file);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@ -174,10 +175,5 @@ int main(int argc, char** argv) {
}
grpc_shutdown();
/* Cleanup. */
remove(roots_filename);
gpr_free(roots_filename);
return 0;
}

@ -16,24 +16,26 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
struct fullstack_secure_fixture_data {
grpc_core::UniquePtr<char> localaddr;
bool server_credential_reloaded = false;
@ -48,10 +50,25 @@ ssl_server_certificate_config_callback(
fullstack_secure_fixture_data* ffd =
static_cast<fullstack_secure_fixture_data*>(user_data);
if (!ffd->server_credential_reloaded) {
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
test_server1_cert};
*config = grpc_ssl_server_certificate_config_create(test_root_cert,
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
*config = grpc_ssl_server_certificate_config_create(ca_cert,
&pem_key_cert_pair, 1);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
ffd->server_credential_reloaded = true;
return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
} else {
@ -175,20 +192,10 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
FILE* roots_file;
size_t roots_size = strlen(test_root_cert);
char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
/* Set the SSL roots env var. */
roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
GPR_ASSERT(roots_filename != nullptr);
GPR_ASSERT(roots_file != nullptr);
GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
fclose(roots_file);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@ -198,9 +205,5 @@ int main(int argc, char** argv) {
grpc_shutdown();
/* Cleanup. */
remove(roots_filename);
gpr_free(roots_filename);
return 0;
}

@ -16,24 +16,26 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/end2end/fixtures/proxy.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
typedef struct fullstack_secure_fixture_data {
grpc_end2end_proxy* proxy;
} fullstack_secure_fixture_data;
@ -41,10 +43,20 @@ typedef struct fullstack_secure_fixture_data {
static grpc_server* create_proxy_server(const char* port,
grpc_channel_args* server_args) {
grpc_server* s = grpc_server_create(server_args, nullptr);
grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_cert_key_pair, 1, 0, nullptr);
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
GPR_ASSERT(grpc_server_add_secure_http2_port(s, port, ssl_creds));
grpc_server_credentials_release(ssl_creds);
return s;
@ -166,10 +178,20 @@ static int fail_server_auth_check(grpc_channel_args* server_args) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_cert_key_pair, 1, 0, nullptr);
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
if (fail_server_auth_check(server_args)) {
grpc_auth_metadata_processor processor = {process_auth_failure, nullptr,
nullptr};
@ -195,20 +217,10 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
FILE* roots_file;
size_t roots_size = strlen(test_root_cert);
char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
/* Set the SSL roots env var. */
roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
GPR_ASSERT(roots_filename != nullptr);
GPR_ASSERT(roots_file != nullptr);
GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
fclose(roots_file);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@ -218,9 +230,5 @@ int main(int argc, char** argv) {
grpc_shutdown();
/* Cleanup. */
remove(roots_filename);
gpr_free(roots_filename);
return 0;
}

@ -30,14 +30,18 @@
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
typedef grpc_core::InlinedVector<grpc_core::Thread, 1> ThreadList;
struct fullstack_secure_fixture_data {
@ -140,17 +144,30 @@ static int client_cred_reload_sync(void* /*config_user_data*/,
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED;
return 0;
}
const grpc_ssl_pem_key_cert_pair pem_key_pair = {
test_server1_key,
test_server1_cert,
};
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
if (arg->key_materials_config->pem_key_cert_pair_list().empty()) {
const auto* pem_key_pair_ptr = &pem_key_pair;
const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(
arg->key_materials_config, test_root_cert, &pem_key_pair_ptr, 1);
arg->key_materials_config, ca_cert, &pem_key_cert_pair_ptr, 1);
}
// new credential has been reloaded.
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
return 0;
}
@ -163,21 +180,34 @@ static int server_cred_reload_sync(void* /*config_user_data*/,
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED;
return 0;
}
const grpc_ssl_pem_key_cert_pair pem_key_pair = {
test_server1_key,
test_server1_cert,
};
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
GPR_ASSERT(arg != nullptr);
GPR_ASSERT(arg->key_materials_config != nullptr);
GPR_ASSERT(arg->key_materials_config->pem_key_cert_pair_list().data() !=
nullptr);
if (arg->key_materials_config->pem_key_cert_pair_list().empty()) {
const auto* pem_key_pair_ptr = &pem_key_pair;
const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(
arg->key_materials_config, test_root_cert, &pem_key_pair_ptr, 1);
arg->key_materials_config, ca_cert, &pem_key_cert_pair_ptr, 1);
}
// new credential has been reloaded.
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
return 0;
}
@ -268,25 +298,13 @@ static grpc_end2end_test_config configs[] = {
};
int main(int argc, char** argv) {
FILE* roots_file;
size_t roots_size = strlen(test_root_cert);
char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
/* Set the SSL roots env var. */
roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
GPR_ASSERT(roots_filename != nullptr);
GPR_ASSERT(roots_file != nullptr);
GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
fclose(roots_file);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
for (size_t ind = 0; ind < sizeof(configs) / sizeof(*configs); ind++) {
grpc_end2end_tests(argc, argv, configs[ind]);
}
grpc_shutdown();
/* Cleanup. */
remove(roots_filename);
gpr_free(roots_filename);
return 0;
}

@ -11,408 +11,36 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
from __future__ import print_function
"""Generates the list of end2end test cases from generate_tests.bzl"""
import os
import sys
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel'
)
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True,
False, [], [], True, False, True, False, True, False, True, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(
fullstack=False, dns_resolver=False, client_channel=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(
secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
local_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False,
fullstack=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'],
client_channel=False)
inproc_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False,
fullstack=False,
name_resolution=False,
supports_compression=False,
is_inproc=True,
is_http2=False,
supports_write_buffering=False,
client_channel=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress':
default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census':
default_unsecure_fixture_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec':
default_secure_fixture_options._replace(ci_mac=False),
'h2_fd':
fd_unsecure_fixture_options,
'h2_full':
default_unsecure_fixture_options,
'h2_full+pipe':
default_unsecure_fixture_options._replace(platforms=['linux'],
exclude_iomgrs=['uv']),
'h2_full+trace':
default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds':
default_unsecure_fixture_options,
'h2_http_proxy':
default_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv'],
supports_proxy_auth=True),
'h2_oauth2':
default_secure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_proxy':
default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair_1byte':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_configs=['msan'],
large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair+trace':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
tracing=True,
large_writes=False,
exclude_iomgrs=['uv']),
'h2_ssl':
default_secure_fixture_options,
'h2_ssl_cred_reload':
default_secure_fixture_options,
'h2_tls':
default_secure_fixture_options,
'h2_local_uds':
local_fixture_options,
'h2_local_ipv4':
local_fixture_options,
'h2_local_ipv6':
local_fixture_options,
'h2_ssl_proxy':
default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_uds':
uds_fixture_options,
'inproc':
inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel'
)
default_test_options = TestOptions(False, False, False, True, False, True, 1.0,
[], False, False, True, False, False, False,
False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
os.chdir(_ROOT)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported':
default_test_options,
'bad_hostname':
default_test_options._replace(needs_names=True),
'bad_ping':
connectivity_test_options._replace(proxyable=False),
'binary_metadata':
default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server':
default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds':
default_test_options._replace(secure=True),
'cancel_after_accept':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status':
default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload':
default_test_options._replace(proxyable=False, needs_compression=True),
'connectivity':
connectivity_test_options._replace(needs_names=True,
proxyable=False,
cpu_cost=LOWCPU,
exclude_iomgrs=['uv']),
'channelz':
default_test_options,
'default_host':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'call_host_override':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'disappearing_server':
connectivity_test_options._replace(flaky=True, needs_names=True),
'empty_batch':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails':
default_test_options,
'filter_context':
default_test_options,
'filter_latency':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code':
default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size':
default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno':
default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request':
default_test_options,
'invoke_large_request':
default_test_options,
'keepalive_timeout':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata':
default_test_options,
'max_concurrent_streams':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_age':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_idle':
connectivity_test_options._replace(proxyable=False,
exclude_iomgrs=['uv'],
cpu_cost=LOWCPU),
'max_message_length':
default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline':
default_test_options,
'no_error_on_hotpath':
default_test_options._replace(proxyable=False),
'no_logging':
default_test_options._replace(traceable=False),
'no_op':
default_test_options,
'payload':
default_test_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'load_reporting_hook': default_test_options,
'ping_pong_streaming':
default_test_options._replace(cpu_cost=LOWCPU),
'ping':
connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth':
default_test_options._replace(needs_proxy_auth=True),
'registered_call':
default_test_options,
'request_with_flags':
default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'request_with_payload':
default_test_options._replace(cpu_cost=LOWCPU),
# TODO(roth): Remove proxyable=False for all retry tests once we
# have a way for the proxy to propagate the fact that trailing
# metadata is available when initial metadata is returned.
# See https://github.com/grpc/grpc/issues/14467 for context.
'retry':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_cancellation':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_subsequent_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status_before_recv_trailing_metadata_started':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_initial_metadata':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_message':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_delay':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_succeeds_before_replay_finished':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_throttled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_too_many_attempts':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'server_finishes_request':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags':
default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request':
default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload':
default_test_options._replace(proxyable=False, exclude_inproc=True),
'stream_compression_payload':
default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming':
default_test_options._replace(exclude_inproc=True),
'simple_delayed_request':
connectivity_test_options,
'simple_metadata':
default_test_options,
'simple_request':
default_test_options,
'streaming_error_response':
default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata':
default_test_options,
'workaround_cronet_compression':
default_test_options,
'write_buffering':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def load(*args):
"""Replacement of bazel's load() function"""
pass
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel:
return False
return True
def struct(**kwargs):
return kwargs # all the args as a dict
def without(l, e):
l = l[:]
l.remove(e)
return l
# generate_tests.bzl is now the source of truth for end2end tests.
# The .bzl file is basically a python file and we can "execute" it
# to get access to the variables it defines.
execfile('test/core/end2end/generate_tests.bzl')
# Originally, this method was used to generate end2end test cases for build.yaml,
# but since the test cases are now extracted from bazel BUILD file,
# this is not used for generating run_tests.py test cases anymore.
# Nevertheless, subset of the output is still used by end2end_tests.cc.template
# and end2end_nosec_tests.cc.template
# TODO(jtattermusch): cleanup this file, so that it only generates the data we need.
# Right now there's some duplication between generate_tests.bzl and this file.
def main():
json = {
# needed by end2end_tests.cc.template and end2end_nosec_tests.cc.template
'core_end2end_tests':
dict((t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys())
dict((t, END2END_TESTS[t]['secure']) for t in END2END_TESTS.keys())
}
print(yaml.dump(json))

@ -282,6 +282,9 @@ END2END_TESTS = {
"retry_exceeds_buffer_size_in_initial_batch": _test_options(
needs_client_channel = True,
proxyable = False,
# TODO(jtattermusch): too long bazel test name makes the test flaky on Windows RBE
# See b/151617965
short_name = "retry_exceeds_buffer_size_in_init",
),
"retry_exceeds_buffer_size_in_subsequent_batch": _test_options(
needs_client_channel = True,
@ -429,6 +432,11 @@ def grpc_end2end_tests():
name = "%s_test" % f,
srcs = ["fixtures/%s.cc" % f],
language = "C++",
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
deps = [
":end2end_tests",
"//test/core/util:grpc_test_util",
@ -499,6 +507,11 @@ def grpc_end2end_nosec_tests():
name = "%s_nosec_test" % f,
srcs = ["fixtures/%s.cc" % f],
language = "C++",
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
deps = [
":end2end_nosec_tests",
"//test/core/util:grpc_test_util_unsecure",

@ -37,6 +37,11 @@ grpc_fuzzer(
name = "ssl_server_fuzzer",
srcs = ["ssl_server_fuzzer.cc"],
corpus = "corpus/ssl_server_corpus",
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
language = "C++",
tags = ["no_windows"],
deps = [
@ -248,6 +253,11 @@ grpc_cc_test(
grpc_cc_test(
name = "tls_security_connector_test",
srcs = ["tls_security_connector_test.cc"],
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
external_deps = [
"gtest",
],
@ -266,6 +276,11 @@ grpc_cc_test(
grpc_cc_test(
name = "grpc_tls_credentials_options_test",
srcs = ["grpc_tls_credentials_options_test.cc"],
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
external_deps = ["gtest"],
language = "C++",
deps = [

@ -17,7 +17,6 @@
*/
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include <gmock/gmock.h>
#include <grpc/support/alloc.h>
@ -25,28 +24,61 @@
#include <grpc/support/string_util.h>
#include <gtest/gtest.h>
#include "src/core/lib/iomgr/load_file.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
namespace testing {
static void SetKeyMaterials(grpc_tls_key_materials_config* config) {
const grpc_ssl_pem_key_cert_pair pem_key_pair = {
test_server1_key,
test_server1_cert,
};
const auto* pem_key_pair_ptr = &pem_key_pair;
grpc_tls_key_materials_config_set_key_materials(config, test_root_cert,
&pem_key_pair_ptr, 1);
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(config, ca_cert,
&pem_key_cert_pair_ptr, 1);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
}
TEST(GrpcTlsCredentialsOptionsTest, SetKeyMaterials) {
grpc_tls_key_materials_config* config =
grpc_tls_key_materials_config_create();
SetKeyMaterials(config);
EXPECT_STREQ(config->pem_root_certs(), test_root_cert);
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
EXPECT_STREQ(config->pem_root_certs(), ca_cert);
EXPECT_EQ(config->pem_key_cert_pair_list().size(), 1);
EXPECT_STREQ(config->pem_key_cert_pair_list()[0].private_key(),
test_server1_key);
EXPECT_STREQ(config->pem_key_cert_pair_list()[0].cert_chain(),
test_server1_cert);
EXPECT_STREQ(config->pem_key_cert_pair_list()[0].private_key(), server_key);
EXPECT_STREQ(config->pem_key_cert_pair_list()[0].cert_chain(), server_cert);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
delete config;
}

@ -23,9 +23,12 @@
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/mock_endpoint.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
bool squelch = true;
// ssl has an array of global gpr_mu's that are never released.
// Turning this on will fail the leak check.
@ -66,18 +69,25 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
// Load key pair and establish server SSL credentials.
grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
grpc_slice ca_slice, cert_slice, key_slice;
ca_slice = grpc_slice_from_static_string(test_root_cert);
cert_slice = grpc_slice_from_static_string(test_server1_cert);
key_slice = grpc_slice_from_static_string(test_server1_key);
const char* ca_cert = (const char*)GRPC_SLICE_START_PTR(ca_slice);
pem_key_cert_pair.private_key =
(const char*)GRPC_SLICE_START_PTR(key_slice);
pem_key_cert_pair.cert_chain =
(const char*)GRPC_SLICE_START_PTR(cert_slice);
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* creds = grpc_ssl_server_credentials_create(
ca_cert, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
// Create security connector
grpc_core::RefCountedPtr<grpc_server_security_connector> sc =
@ -109,9 +119,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
sc.reset(DEBUG_LOCATION, "test");
grpc_server_credentials_release(creds);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
grpc_core::ExecCtx::Get()->Flush();
}

@ -26,22 +26,39 @@
#include <stdlib.h>
#include <string.h>
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/tsi/transport_security.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
namespace {
enum CredReloadResult { FAIL, SUCCESS, UNCHANGED, ASYNC };
void SetKeyMaterials(grpc_tls_key_materials_config* config) {
const grpc_ssl_pem_key_cert_pair pem_key_pair = {
test_server1_key,
test_server1_cert,
};
const auto* pem_key_pair_ptr = &pem_key_pair;
grpc_tls_key_materials_config_set_key_materials(config, test_root_cert,
&pem_key_pair_ptr, 1);
grpc_slice ca_slice, cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* ca_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(config, ca_cert,
&pem_key_cert_pair_ptr, 1);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_slice_unref(ca_slice);
}
int CredReloadSuccess(void* /*config_user_data*/,

@ -136,6 +136,11 @@ grpc_cc_test(
grpc_cc_test(
name = "sequential_connectivity_test",
srcs = ["sequential_connectivity_test.cc"],
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
flaky = True, # TODO(b/151696318)
language = "C++",
deps = [

@ -25,10 +25,14 @@
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "src/core/lib/iomgr/load_file.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
typedef struct test_fixture {
const char* name;
void (*add_server_port)(grpc_server* server, const char* addr);
@ -139,17 +143,33 @@ static const test_fixture insecure_test = {
};
static void secure_test_add_port(grpc_server* server, const char* addr) {
grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_cert_key_pair, 1, 0, nullptr);
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
grpc_server_add_secure_http2_port(server, addr, ssl_creds);
grpc_server_credentials_release(ssl_creds);
}
static grpc_channel* secure_test_create_channel(const char* addr) {
grpc_slice ca_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
const char* test_root_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr);
grpc_slice_unref(ca_slice);
grpc_arg ssl_name_override = {
GRPC_ARG_STRING,
const_cast<char*>(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG),

@ -31,6 +31,7 @@
#define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME "bigtable.google.api.com"
#define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_SERVICE_ACCOUNT1 "A@google.com"
#define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_SERVICE_ACCOUNT2 "B@google.com"
#define ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE 64 * 1024
const size_t kHandshakerClientOpNum = 4;
const size_t kMaxRpcVersionMajor = 3;
@ -155,8 +156,8 @@ static grpc_call_error check_must_not_be_called(grpc_call* /*call*/,
/**
* A mock grpc_caller used to check correct execution of client_start operation.
* It checks if the client_start handshaker request is populated with correct
* handshake_security_protocol, application_protocol, and record_protocol, and
* op is correctly populated.
* handshake_security_protocol, application_protocol, record_protocol and
* max_frame_size, and op is correctly populated.
*/
static grpc_call_error check_client_start_success(grpc_call* /*call*/,
const grpc_op* op,
@ -196,7 +197,8 @@ static grpc_call_error check_client_start_success(grpc_call* /*call*/,
GPR_ASSERT(upb_strview_eql(
grpc_gcp_StartClientHandshakeReq_target_name(client_start),
upb_strview_makez(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME)));
GPR_ASSERT(grpc_gcp_StartClientHandshakeReq_max_frame_size(client_start) ==
ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE);
GPR_ASSERT(validate_op(client, op, nops, true /* is_start */));
return GRPC_CALL_OK;
}
@ -204,8 +206,8 @@ static grpc_call_error check_client_start_success(grpc_call* /*call*/,
/**
* A mock grpc_caller used to check correct execution of server_start operation.
* It checks if the server_start handshaker request is populated with correct
* handshake_security_protocol, application_protocol, and record_protocol, and
* op is correctly populated.
* handshake_security_protocol, application_protocol, record_protocol and
* max_frame_size, and op is correctly populated.
*/
static grpc_call_error check_server_start_success(grpc_call* /*call*/,
const grpc_op* op,
@ -245,6 +247,8 @@ static grpc_call_error check_server_start_success(grpc_call* /*call*/,
upb_strview_makez(ALTS_RECORD_PROTOCOL)));
validate_rpc_protocol_versions(
grpc_gcp_StartServerHandshakeReq_rpc_versions(server_start));
GPR_ASSERT(grpc_gcp_StartServerHandshakeReq_max_frame_size(server_start) ==
ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE);
GPR_ASSERT(validate_op(client, op, nops, true /* is_start */));
return GRPC_CALL_OK;
}
@ -321,12 +325,14 @@ static alts_handshaker_client_test_config* create_config() {
nullptr, config->channel, ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING,
nullptr, server_options,
grpc_slice_from_static_string(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME),
nullptr, nullptr, nullptr, nullptr, false);
nullptr, nullptr, nullptr, nullptr, false,
ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE);
config->client = alts_grpc_handshaker_client_create(
nullptr, config->channel, ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING,
nullptr, client_options,
grpc_slice_from_static_string(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME),
nullptr, nullptr, nullptr, nullptr, true);
nullptr, nullptr, nullptr, nullptr, true,
ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE);
GPR_ASSERT(config->client != nullptr);
GPR_ASSERT(config->server != nullptr);
grpc_alts_credentials_options_destroy(client_options);

@ -27,6 +27,7 @@
#include "src/core/tsi/alts/handshaker/alts_shared_resource.h"
#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h"
#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h"
#include "src/core/tsi/transport_security_grpc.h"
#include "src/proto/grpc/gcp/altscontext.upb.h"
#include "test/core/tsi/alts/handshaker/alts_handshaker_service_api_test_lib.h"
#include "test/core/util/test_config.h"
@ -49,6 +50,7 @@
#define ALTS_TSI_HANDSHAKER_TEST_APPLICATION_PROTOCOL \
"test application protocol"
#define ALTS_TSI_HANDSHAKER_TEST_RECORD_PROTOCOL "test record protocol"
#define ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE 256 * 1024
using grpc_core::internal::alts_handshaker_client_check_fields_for_testing;
using grpc_core::internal::alts_handshaker_client_get_handshaker_for_testing;
@ -164,6 +166,8 @@ static grpc_byte_buffer* generate_handshaker_response(
upb_strview_makez(ALTS_TSI_HANDSHAKER_TEST_APPLICATION_PROTOCOL));
grpc_gcp_HandshakerResult_set_record_protocol(
result, upb_strview_makez(ALTS_TSI_HANDSHAKER_TEST_RECORD_PROTOCOL));
grpc_gcp_HandshakerResult_set_max_frame_size(
result, ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE);
break;
case SERVER_NEXT:
grpc_gcp_HandshakerResp_set_bytes_consumed(
@ -283,6 +287,17 @@ static void on_client_next_success_cb(tsi_result status, void* user_data,
GPR_ASSERT(memcmp(bytes_to_send, ALTS_TSI_HANDSHAKER_TEST_OUT_FRAME,
bytes_to_send_size) == 0);
GPR_ASSERT(result != nullptr);
// Validate max frame size value after Frame Size Negotiation. Here peer max
// frame size is greater than default value, and user specified max frame size
// is absent.
tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr;
GPR_ASSERT(tsi_handshaker_result_create_zero_copy_grpc_protector(
result, nullptr, &zero_copy_protector) == TSI_OK);
size_t actual_max_frame_size;
tsi_zero_copy_grpc_protector_max_frame_size(zero_copy_protector,
&actual_max_frame_size);
GPR_ASSERT(actual_max_frame_size == kTsiAltsMaxFrameSize);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
/* Validate peer identity. */
tsi_peer peer;
GPR_ASSERT(tsi_handshaker_result_extract_peer(result, &peer) == TSI_OK);
@ -343,6 +358,20 @@ static void on_server_next_success_cb(tsi_result status, void* user_data,
GPR_ASSERT(bytes_to_send_size == 0);
GPR_ASSERT(bytes_to_send == nullptr);
GPR_ASSERT(result != nullptr);
// Validate max frame size value after Frame Size Negotiation. The negotiated
// frame size value equals minimum send frame size, due to the absence of peer
// max frame size.
tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr;
size_t user_specified_max_frame_size =
ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE;
GPR_ASSERT(tsi_handshaker_result_create_zero_copy_grpc_protector(
result, &user_specified_max_frame_size,
&zero_copy_protector) == TSI_OK);
size_t actual_max_frame_size;
tsi_zero_copy_grpc_protector_max_frame_size(zero_copy_protector,
&actual_max_frame_size);
GPR_ASSERT(actual_max_frame_size == kTsiAltsMinFrameSize);
tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
/* Validate peer identity. */
tsi_peer peer;
GPR_ASSERT(tsi_handshaker_result_extract_peer(result, &peer) == TSI_OK);
@ -478,7 +507,7 @@ static tsi_handshaker* create_test_handshaker(bool is_client) {
grpc_alts_credentials_client_options_create();
alts_tsi_handshaker_create(options, "target_name",
ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING, is_client,
nullptr, &handshaker);
nullptr, &handshaker, 0);
alts_tsi_handshaker* alts_handshaker =
reinterpret_cast<alts_tsi_handshaker*>(handshaker);
alts_tsi_handshaker_set_client_vtable_for_testing(alts_handshaker, &vtable);

@ -14,12 +14,12 @@
load("//bazel:grpc_build_system.bzl", "grpc_cc_test")
def grpc_fuzzer(name, corpus, srcs = [], deps = [], size = "large", **kwargs):
def grpc_fuzzer(name, corpus, srcs = [], deps = [], data = [], size = "large", **kwargs):
grpc_cc_test(
name = name,
srcs = srcs,
deps = deps + ["//test/core/util:fuzzer_corpus_test"],
data = native.glob([corpus + "/**"]),
data = data + native.glob([corpus + "/**"]),
external_deps = [
"gtest",
],

@ -34,11 +34,15 @@ grpc_cc_test(
grpc_cc_test(
name = "client_channel_stress_test",
size = "large",
srcs = ["client_channel_stress_test.cc"],
# TODO(jtattermusch): test fails frequently on Win RBE, but passes locally
# reenable the tests once it works reliably on Win RBE.
# TODO(roth): Test marked as manual for now due to variable duration
# problem triggered by https://github.com/grpc/grpc/pull/22481.
# Once we figure out the problem, either re-enable or just decide to
# remove this test. Tracked internally in b/153136407.
tags = [
"manual",
"no_test_android", # fails on android due to "Too many open files".
"no_windows",
],

@ -1144,16 +1144,12 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
void ResetStub(int fallback_timeout = 0, int failover_timeout = 0,
void ResetStub(int failover_timeout = 0,
const grpc::string& expected_targets = "",
int xds_resource_does_not_exist_timeout = 0) {
ChannelArguments args;
// TODO(juanlishen): Add setter to ChannelArguments.
if (fallback_timeout > 0) {
args.SetInt(GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, fallback_timeout);
}
if (failover_timeout > 0) {
args.SetInt(GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS, failover_timeout);
args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
}
if (xds_resource_does_not_exist_timeout > 0) {
args.SetInt(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS,
@ -1285,7 +1281,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
: kDefaultServiceConfigWithoutLoadReporting_;
result.service_config =
grpc_core::ServiceConfig::Create(service_config_json, &error);
GRPC_ERROR_UNREF(error);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
ASSERT_NE(result.service_config.get(), nullptr);
grpc_arg arg = grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
lb_channel_response_generator == nullptr
? lb_channel_response_generator_.get()
@ -1317,7 +1314,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
grpc_error* error = GRPC_ERROR_NONE;
result.service_config =
grpc_core::ServiceConfig::Create(service_config_json, &error);
GRPC_ERROR_UNREF(error);
ASSERT_NE(result.service_config.get(), nullptr);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
}
if (lb_channel_response_generator == nullptr) {
lb_channel_response_generator = lb_channel_response_generator_.get();
@ -1517,7 +1515,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{\n"
" { \"eds_experimental\":{\n"
" \"clusterName\": \"application_target_name\",\n"
" \"lrsLoadReportingServerName\": \"\"\n"
" } }\n"
" ]\n"
@ -1526,7 +1525,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{\n"
" { \"eds_experimental\":{\n"
" \"clusterName\": \"application_target_name\"\n"
" } }\n"
" ]\n"
"}";
@ -1561,7 +1561,7 @@ TEST_P(BasicTest, Vanilla) {
}
// Check LB policy name for the channel.
EXPECT_EQ(
(GetParam().use_xds_resolver() ? "cds_experimental" : "xds_experimental"),
(GetParam().use_xds_resolver() ? "cds_experimental" : "eds_experimental"),
channel_->GetLoadBalancingPolicyName());
}
@ -1939,7 +1939,7 @@ using SecureNamingTest = BasicTest;
// Tests that secure naming check passes if target name is expected.
TEST_P(SecureNamingTest, TargetNameIsExpected) {
// TODO(juanlishen): Use separate fake creds for the balancer channel.
ResetStub(0, 0, kApplicationTargetName_ + ";lb");
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
@ -1969,7 +1969,7 @@ TEST_P(SecureNamingTest, TargetNameIsUnexpected) {
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED(
{
ResetStub(0, 0, kApplicationTargetName_ + ";lb");
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
@ -2128,7 +2128,7 @@ TEST_P(LdsTest, RouteActionHasNoCluster) {
// Tests that LDS client times out when no response received.
TEST_P(LdsTest, Timeout) {
ResetStub(0, 0, "", 500);
ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@ -2263,7 +2263,7 @@ TEST_P(RdsTest, RouteActionHasNoCluster) {
// Tests that RDS client times out when no response received.
TEST_P(RdsTest, Timeout) {
ResetStub(0, 0, "", 500);
ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl);
balancers_[0]->ads_service()->SetLdsToUseDynamicRds();
SetNextResolution({});
@ -2336,7 +2336,7 @@ TEST_P(CdsTest, WrongLrsServer) {
// Tests that CDS client times out when no response received.
TEST_P(CdsTest, Timeout) {
ResetStub(0, 0, "", 500);
ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@ -2346,26 +2346,13 @@ TEST_P(CdsTest, Timeout) {
using EdsTest = BasicTest;
TEST_P(EdsTest, Timeout) {
ResetStub(0, 0, "", 500);
ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
}
// Tests that EDS client should send a NACK if the EDS update contains
// no localities but does not say to drop all calls.
TEST_P(EdsTest, NacksNoLocalitiesWithoutDropAll) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args;
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
CheckRpcSendFailure();
EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state(),
AdsServiceImpl::NACKED);
}
// Tests that EDS client should send a NACK if the EDS update contains
// sparse priorities.
TEST_P(EdsTest, NacksSparsePriorityList) {
@ -2452,6 +2439,18 @@ TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) {
kNumRpcs / backends_.size());
}
// EDS update with no localities.
TEST_P(LocalityMapTest, NoLocalities) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// EDS response contains 2 localities, one with no endpoints.
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource({}), kDefaultResourceName);
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
}
// Tests that the locality map can work properly even when it contains a large
// number of localities.
TEST_P(LocalityMapTest, StressTest) {
@ -2607,7 +2606,7 @@ class FailoverTest : public BasicTest {
public:
void SetUp() override {
BasicTest::SetUp();
ResetStub(0, 100, "");
ResetStub(100, "");
}
};
@ -3044,241 +3043,6 @@ TEST_P(DropTest, DropAll) {
}
}
using FallbackTest = BasicTest;
// Tests that RPCs are handled by the fallback backends before the serverlist is
// received, but will be handled by the serverlist after it's received.
TEST_P(FallbackTest, Vanilla) {
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendsInResolution = backends_.size() / 2;
ResetStub(kFallbackTimeoutMs);
SetNextResolution(GetBackendPorts(0, kNumBackendsInResolution));
SetNextResolutionForLbChannelAllBalancers();
// Send non-empty serverlist only after kServerlistDelayMs.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(kNumBackendsInResolution)},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
AdsServiceImpl::BuildEdsResource(args), kServerlistDelayMs,
kDefaultResourceName));
// Wait until all the fallback backends are reachable.
WaitForAllBackends(0 /* start_index */,
kNumBackendsInResolution /* stop_index */);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(kNumBackendsInResolution);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// Fallback is used: each backend returned by the resolver should have
// gotten one request.
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
// Wait until the serverlist reception has been processed and all backends
// in the serverlist are reachable.
WaitForAllBackends(kNumBackendsInResolution /* start_index */);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(backends_.size() - kNumBackendsInResolution);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// Serverlist is used: each backend returned by the balancer should
// have gotten one request.
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
}
delayed_resource_setter.join();
}
// Tests that RPCs are handled by the updated fallback backends before
// serverlist is received,
TEST_P(FallbackTest, Update) {
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendsInResolution = backends_.size() / 3;
const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3;
ResetStub(kFallbackTimeoutMs);
SetNextResolution(GetBackendPorts(0, kNumBackendsInResolution));
SetNextResolutionForLbChannelAllBalancers();
// Send non-empty serverlist only after kServerlistDelayMs.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(kNumBackendsInResolution +
kNumBackendsInResolutionUpdate)},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
AdsServiceImpl::BuildEdsResource(args), kServerlistDelayMs,
kDefaultResourceName));
// Wait until all the fallback backends are reachable.
WaitForAllBackends(0 /* start_index */,
kNumBackendsInResolution /* stop_index */);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(kNumBackendsInResolution);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// Fallback is used: each backend returned by the resolver should have
// gotten one request.
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
SetNextResolution(GetBackendPorts(
kNumBackendsInResolution,
kNumBackendsInResolution + kNumBackendsInResolutionUpdate));
// Wait until the resolution update has been processed and all the new
// fallback backends are reachable.
WaitForAllBackends(kNumBackendsInResolution /* start_index */,
kNumBackendsInResolution +
kNumBackendsInResolutionUpdate /* stop_index */);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(kNumBackendsInResolutionUpdate);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// The resolution update is used: each backend in the resolution update should
// have gotten one request.
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution;
i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
i < backends_.size(); ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
// Wait until the serverlist reception has been processed and all backends
// in the serverlist are reachable.
WaitForAllBackends(kNumBackendsInResolution +
kNumBackendsInResolutionUpdate /* start_index */);
gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
CheckRpcSendOk(backends_.size() - kNumBackendsInResolution -
kNumBackendsInResolutionUpdate);
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
// Serverlist is used: each backend returned by the balancer should
// have gotten one request.
for (size_t i = 0;
i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
i < backends_.size(); ++i) {
EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
}
delayed_resource_setter.join();
}
// Tests that fallback will kick in immediately if the balancer channel fails.
TEST_P(FallbackTest, FallbackEarlyWhenBalancerChannelFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
// Return an unreachable balancer and one fallback backend.
SetNextResolution({backends_[0]->port()});
SetNextResolutionForLbChannel({g_port_saver->GetPort()});
// Send RPC with deadline less than the fallback timeout and make sure it
// succeeds.
CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
/* wait_for_ready */ false);
}
// Tests that fallback will kick in immediately if the balancer call fails.
TEST_P(FallbackTest, FallbackEarlyWhenBalancerCallFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
// Return one balancer and one fallback backend.
SetNextResolution({backends_[0]->port()});
SetNextResolutionForLbChannelAllBalancers();
// Balancer drops call without sending a serverlist.
balancers_[0]->ads_service()->NotifyDoneWithAdsCall();
// Send RPC with deadline less than the fallback timeout and make sure it
// succeeds.
CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
/* wait_for_ready */ false);
}
// Tests that fallback mode is entered if balancer response is received but the
// backends can't be reached.
TEST_P(FallbackTest, FallbackIfResponseReceivedButChildNotReady) {
const int kFallbackTimeoutMs = 500 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
SetNextResolution({backends_[0]->port()});
SetNextResolutionForLbChannelAllBalancers();
// Send a serverlist that only contains an unreachable backend before fallback
// timeout.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
// Because no child policy is ready before fallback timeout, we enter fallback
// mode.
WaitForBackend(0);
}
// Tests that fallback mode is exited if the balancer tells the client to drop
// all the calls.
TEST_P(FallbackTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
// Return an unreachable balancer and one fallback backend.
SetNextResolution({backends_[0]->port()});
SetNextResolutionForLbChannel({g_port_saver->GetPort()});
// Enter fallback mode because the LB channel fails to connect.
WaitForBackend(0);
// Return a new balancer that sends a response to drop all calls.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, 1000000}};
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
SetNextResolutionForLbChannelAllBalancers();
// Send RPCs until failure.
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(5000, GPR_TIMESPAN));
do {
auto status = SendRpc();
if (!status.ok()) break;
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
CheckRpcSendFailure();
}
// Tests that fallback mode is exited if the child policy becomes ready.
TEST_P(FallbackTest, FallbackModeIsExitedAfterChildReady) {
// Return an unreachable balancer and one fallback backend.
SetNextResolution({backends_[0]->port()});
SetNextResolutionForLbChannel({g_port_saver->GetPort()});
// Enter fallback mode because the LB channel fails to connect.
WaitForBackend(0);
// Return a new balancer that sends a dead backend.
ShutdownBackend(1);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[1]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
SetNextResolutionForLbChannelAllBalancers();
// The state (TRANSIENT_FAILURE) update from the child policy will be ignored
// because we are still in fallback mode.
gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(500, GPR_TIMESPAN));
// Send 0.5 second worth of RPCs.
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// After the backend is restarted, the child policy will eventually be READY,
// and we will exit fallback mode.
StartBackend(1);
WaitForBackend(1);
// We have exited fallback mode, so calls will go to the child policy
// exclusively.
CheckRpcSendOk(100);
EXPECT_EQ(0U, backends_[0]->backend_service()->request_count());
EXPECT_EQ(100U, backends_[1]->backend_service()->request_count());
}
class BalancerUpdateTest : public XdsEnd2endTest {
public:
BalancerUpdateTest() : XdsEnd2endTest(4, 3) {}
@ -3781,12 +3545,6 @@ INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest,
TestType(true, true)),
&TestTypeName);
// Fallback does not work with xds resolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, FallbackTest,
::testing::Values(TestType(false, true),
TestType(false, false)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, BalancerUpdateTest,
::testing::Values(TestType(false, true),
TestType(false, false),

@ -206,7 +206,6 @@ grpc_cc_test(
srcs = [
"bm_fullstack_streaming_pump.cc",
],
flaky = True, # TODO(b/150422385)
tags = [
"no_mac", # to emulate "excluded_poll_engines: poll"
"no_windows",

@ -34,8 +34,6 @@ BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, UDS)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, InProcess)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, SockPair)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, InProcessCHTTP2)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, TCP)
@ -44,19 +42,15 @@ BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, UDS)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcess)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, SockPair)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcessCHTTP2)
->Range(0, 128 * 1024 * 1024);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinTCP)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinUDS)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinInProcess)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinSockPair)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinInProcessCHTTP2)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinTCP)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinUDS)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinInProcess)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinSockPair)->Arg(0);
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinInProcessCHTTP2)->Arg(0);
} // namespace testing

@ -1,7 +1,4 @@
# bazelrc file
# bazel >= 0.18 looks for %workspace%/.bazelrc (which redirects here)
# Older bazel versions look for %workspace%/tools/bazel.rc (this file)
# See https://github.com/bazelbuild/bazel/issues/6319
build --client_env=CC=clang
build --copt=-DGRPC_BAZEL_BUILD

@ -23,7 +23,7 @@ gen_build_yaml_dirs=" \
src/upb \
src/zlib \
src/c-ares \
test/core/end2end \
test/core/end2end \
test/cpp/naming \
tools/run_tests/lb_interop_tests"

@ -12,10 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM google/dart:2.3
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart
FROM google/dart:2.7
# Define the default command.
CMD ["bash"]

@ -84,6 +84,29 @@ RUN cd /tmp && \
RUN python3.6 -m ensurepip && \
python3.6 -m pip install coverage
#=================
# Compile CPython 3.8.0b4 from source
RUN apt-get update && apt-get install -y zlib1g-dev libssl-dev
RUN apt-get update && apt-get install -y jq build-essential libffi-dev
RUN cd /tmp && \
wget -q https://www.python.org/ftp/python/3.8.0/Python-3.8.0b4.tgz && \
tar xzvf Python-3.8.0b4.tgz && \
cd Python-3.8.0b4 && \
./configure && \
make install
RUN cd /tmp && \
echo "b8f4f897df967014ddb42033b90c3058 Python-3.8.0b4.tgz" > checksum.md5 && \
md5sum -c checksum.md5
RUN python3.8 -m ensurepip && \
python3.8 -m pip install coverage
RUN apt-get update && apt-get install -y python3.5 python3.5-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5
RUN apt-get update && apt-get -t buster install -y python3.7 python3-all-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.7

@ -1091,6 +1091,8 @@ src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/http_proxy.h \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy.h \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.h \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
@ -1106,10 +1108,13 @@ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.h \
src/core/ext/filters/client_channel/lb_policy_factory.h \
src/core/ext/filters/client_channel/lb_policy_registry.cc \

@ -888,6 +888,8 @@ src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/http_proxy.h \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy.h \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.h \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
@ -903,10 +905,13 @@ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.h \
src/core/ext/filters/client_channel/lb_policy_factory.h \
src/core/ext/filters/client_channel/lb_policy_registry.cc \

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
bazel build //src/python/grpcio_tests/tests_py3_only/interop:xds_interop_client
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,xds_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
bazel build test/cpp/interop:xds_interop_client
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,xds_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -3757,6 +3757,26 @@
],
"uses_polling": true
},
{
"args": [],
"benchmark": true,
"ci_platforms": [
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": false,
"language": "c++",
"name": "bm_fullstack_streaming_pump",
"platforms": [
"linux",
"posix"
],
"uses_polling": true
},
{
"args": [],
"benchmark": true,
@ -4077,28 +4097,6 @@
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "client_channel_stress_test",
"platforms": [
"linux",
"mac",
"posix"
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,

@ -165,24 +165,33 @@ pip_install_dir() {
cd "$PWD"
}
# On library/version/platforms combo that do not have a binary
# published, we may end up building a dependency from source. In that
# case, several of our build environment variables may disrupt the
# third-party build process. This function pipes through only the
# minimal environment necessary.
pip_install() {
/usr/bin/env -i PATH="$PATH" "$VENV_PYTHON" -m pip install "$@"
}
case "$VENV" in
*py36_gevent*)
# TODO(https://github.com/grpc/grpc/issues/15411) unpin this
$VENV_PYTHON -m pip install gevent==1.3.b1
pip_install gevent==1.3.b1
;;
*gevent*)
$VENV_PYTHON -m pip install -U gevent
pip_install -U gevent
;;
esac
$VENV_PYTHON -m pip install --upgrade pip==19.3.1
$VENV_PYTHON -m pip install --upgrade setuptools
$VENV_PYTHON -m pip install --upgrade cython
$VENV_PYTHON -m pip install --upgrade six enum34 protobuf
pip_install --upgrade pip==19.3.1
pip_install --upgrade setuptools
pip_install --upgrade cython
pip_install --upgrade six enum34 protobuf
if [ "$("$VENV_PYTHON" -c "import sys; print(sys.version_info[0])")" == "2" ]
then
$VENV_PYTHON -m pip install futures
pip_install futures
fi
pip_install_dir "$ROOT"
@ -214,9 +223,9 @@ pip_install_dir "$ROOT/src/python/grpcio_status"
pip_install_dir "$ROOT/src/python/grpcio_testing"
# Build/install tests
$VENV_PYTHON -m pip install coverage==4.4 oauth2client==4.1.0 \
google-auth==1.0.0 requests==2.14.2 \
googleapis-common-protos==1.5.5
pip_install coverage==4.4 oauth2client==4.1.0 \
google-auth==1.0.0 requests==2.14.2 \
googleapis-common-protos==1.5.5
$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" preprocess
$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" build_package_protos
pip_install_dir "$ROOT/src/python/grpcio_tests"

@ -866,9 +866,19 @@ class PythonLanguage(object):
else:
if args.iomgr_platform == 'asyncio':
return (python36_config,)
elif os.uname()[0] == 'Darwin':
# NOTE(rbellevi): Testing takes significantly longer on
# MacOS, so we restrict the number of interpreter versions
# tested.
return (
python27_config,
python36_config,
python37_config,
)
else:
return (
python27_config,
python35_config,
python36_config,
python37_config,
)

@ -122,20 +122,22 @@ argp.add_argument(
help=
'If provided, uses this file instead of retrieving via the GCP discovery '
'API')
argp.add_argument(
'--alpha_compute_discovery_document',
default=None,
type=str,
help='If provided, uses this file instead of retrieving via the alpha GCP '
'discovery API')
argp.add_argument('--network',
default='global/networks/default',
help='GCP network to use')
argp.add_argument('--service_port_range',
default='80',
default='8080:8110',
type=parse_port_range,
help='Listening port for created gRPC backends. Specified as '
'either a single int or as a range in the format min:max, in '
'which case an available port p will be chosen s.t. min <= p '
'<= max')
argp.add_argument('--forwarding_rule_ip_prefix',
default='172.16.0.',
help='If set, an available IP with this prefix followed by '
'0-255 will be used for the generated forwarding rule.')
argp.add_argument(
'--stats_port',
default=8079,
@ -180,6 +182,10 @@ argp.add_argument('--log_client_output',
help='Log captured client output',
default=False,
action='store_true')
argp.add_argument('--only_stable_gcp_apis',
help='Do not use alpha compute APIs',
default=False,
action='store_true')
args = argp.parse_args()
if args.verbose:
@ -192,6 +198,7 @@ _INSTANCE_GROUP_SIZE = args.instance_group_size
_NUM_TEST_RPCS = 10 * args.qps
_WAIT_FOR_STATS_SEC = 180
_WAIT_FOR_URL_MAP_PATCH_SEC = 300
_GCP_API_RETRIES = 5
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
@ -543,8 +550,8 @@ def create_instance_template(gcp, name, network, source_image, machine_type,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceTemplates().insert(project=gcp.project,
body=config).execute()
result = gcp.compute.instanceTemplates().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.instance_template = GcpResource(config['name'], result['targetLink'])
@ -561,13 +568,14 @@ def add_instance_group(gcp, zone, name, size):
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceGroupManagers().insert(project=gcp.project,
zone=zone,
body=config).execute()
result = gcp.compute.instanceGroupManagers().insert(
project=gcp.project, zone=zone,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp, zone, result['name'])
result = gcp.compute.instanceGroupManagers().get(
project=gcp.project, zone=zone,
instanceGroupManager=config['name']).execute()
instanceGroupManager=config['name']).execute(
num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(config['name'], result['instanceGroup'],
zone)
gcp.instance_groups.append(instance_group)
@ -575,16 +583,27 @@ def add_instance_group(gcp, zone, name, size):
def create_health_check(gcp, name):
config = {
'name': name,
'type': 'TCP',
'tcpHealthCheck': {
'portName': 'grpc'
if gcp.alpha_compute:
config = {
'name': name,
'type': 'GRPC',
'grpcHealthCheck': {
'portSpecification': 'USE_SERVING_PORT'
}
}
}
compute_to_use = gcp.alpha_compute
else:
config = {
'name': name,
'type': 'TCP',
'tcpHealthCheck': {
'portName': 'grpc'
}
}
compute_to_use = gcp.compute
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.healthChecks().insert(project=gcp.project,
body=config).execute()
result = compute_to_use.healthChecks().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check = GcpResource(config['name'], result['targetLink'])
@ -600,24 +619,30 @@ def create_health_check_firewall_rule(gcp, name):
'targetTags': ['allow-health-checks'],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.firewalls().insert(project=gcp.project,
body=config).execute()
result = gcp.compute.firewalls().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check_firewall_rule = GcpResource(config['name'],
result['targetLink'])
def add_backend_service(gcp, name):
if gcp.alpha_compute:
protocol = 'GRPC'
compute_to_use = gcp.alpha_compute
else:
protocol = 'HTTP2'
compute_to_use = gcp.compute
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'healthChecks': [gcp.health_check.url],
'portName': 'grpc',
'protocol': 'HTTP2'
'protocol': protocol
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.backendServices().insert(project=gcp.project,
body=config).execute()
result = compute_to_use.backendServices().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
backend_service = GcpResource(config['name'], result['targetLink'])
gcp.backend_services.append(backend_service)
@ -638,56 +663,103 @@ def create_url_map(gcp, name, backend_service, host_name):
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().insert(project=gcp.project,
body=config).execute()
result = gcp.compute.urlMaps().insert(
project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.url_map = GcpResource(config['name'], result['targetLink'])
def create_target_http_proxy(gcp, name):
def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
config = {
'name': name,
'url_map': gcp.url_map.url,
'hostRules': [{
'hosts': ['%s:%d' % (host_name, gcp.service_port)],
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.targetHttpProxies().insert(project=gcp.project,
body=config).execute()
result = gcp.compute.urlMaps().patch(
project=gcp.project, urlMap=name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.target_http_proxy = GcpResource(config['name'], result['targetLink'])
def create_global_forwarding_rule(gcp, name, ip, port):
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'portRange': str(port),
'IPAddress': ip,
'network': args.network,
'target': gcp.target_http_proxy.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.globalForwardingRules().insert(project=gcp.project,
body=config).execute()
def create_target_proxy(gcp, name):
if gcp.alpha_compute:
config = {
'name': name,
'url_map': gcp.url_map.url,
'validate_for_proxyless': True,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.alpha_compute.targetGrpcProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
else:
config = {
'name': name,
'url_map': gcp.url_map.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.targetHttpProxies().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.global_forwarding_rule = GcpResource(config['name'],
result['targetLink'])
gcp.target_proxy = GcpResource(config['name'], result['targetLink'])
def create_global_forwarding_rule(gcp, name, potential_ports):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
for port in potential_ports:
try:
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'portRange': str(port),
'IPAddress': '0.0.0.0',
'network': args.network,
'target': gcp.target_proxy.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.globalForwardingRules().insert(
project=gcp.project,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.global_forwarding_rule = GcpResource(config['name'],
result['targetLink'])
gcp.service_port = port
return
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'0.0.0.0:%d. Retrying with another port.' % (http_error, port))
def delete_global_forwarding_rule(gcp):
try:
result = gcp.compute.globalForwardingRules().delete(
project=gcp.project,
forwardingRule=gcp.global_forwarding_rule.name).execute()
forwardingRule=gcp.global_forwarding_rule.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
def delete_target_http_proxy(gcp):
def delete_target_proxy(gcp):
try:
result = gcp.compute.targetHttpProxies().delete(
project=gcp.project,
targetHttpProxy=gcp.target_http_proxy.name).execute()
if gcp.alpha_compute:
result = gcp.alpha_compute.targetGrpcProxies().delete(
project=gcp.project,
targetGrpcProxy=gcp.target_proxy.name).execute(
num_retries=_GCP_API_RETRIES)
else:
result = gcp.compute.targetHttpProxies().delete(
project=gcp.project,
targetHttpProxy=gcp.target_proxy.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -696,7 +768,8 @@ def delete_target_http_proxy(gcp):
def delete_url_map(gcp):
try:
result = gcp.compute.urlMaps().delete(
project=gcp.project, urlMap=gcp.url_map.name).execute()
project=gcp.project,
urlMap=gcp.url_map.name).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -707,7 +780,8 @@ def delete_backend_services(gcp):
try:
result = gcp.compute.backendServices().delete(
project=gcp.project,
backendService=backend_service.name).execute()
backendService=backend_service.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -717,7 +791,8 @@ def delete_firewall(gcp):
try:
result = gcp.compute.firewalls().delete(
project=gcp.project,
firewall=gcp.health_check_firewall_rule.name).execute()
firewall=gcp.health_check_firewall_rule.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -726,7 +801,8 @@ def delete_firewall(gcp):
def delete_health_check(gcp):
try:
result = gcp.compute.healthChecks().delete(
project=gcp.project, healthCheck=gcp.health_check.name).execute()
project=gcp.project, healthCheck=gcp.health_check.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -738,7 +814,8 @@ def delete_instance_groups(gcp):
result = gcp.compute.instanceGroupManagers().delete(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name).execute()
instanceGroupManager=instance_group.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
@ -751,7 +828,8 @@ def delete_instance_template(gcp):
try:
result = gcp.compute.instanceTemplates().delete(
project=gcp.project,
instanceTemplate=gcp.instance_template.name).execute()
instanceTemplate=gcp.instance_template.name).execute(
num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@ -761,6 +839,10 @@ def patch_backend_instances(gcp,
backend_service,
instance_groups,
balancing_mode='UTILIZATION'):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
config = {
'backends': [{
'group': instance_group.url,
@ -769,10 +851,12 @@ def patch_backend_instances(gcp,
} for instance_group in instance_groups],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.backendServices().patch(
result = compute_to_use.backendServices().patch(
project=gcp.project, backendService=backend_service.name,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
def resize_instance_group(gcp,
@ -783,7 +867,7 @@ def resize_instance_group(gcp,
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
size=new_size).execute()
size=new_size).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
@ -795,7 +879,7 @@ def resize_instance_group(gcp,
break
if time.time() - start_time > timeout_sec:
raise Exception('Failed to resize primary instance group')
time.sleep(1)
time.sleep(2)
def patch_url_map_backend_service(gcp, backend_service):
@ -808,9 +892,9 @@ def patch_url_map_backend_service(gcp, backend_service):
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().patch(project=gcp.project,
urlMap=gcp.url_map.name,
body=config).execute()
result = gcp.compute.urlMaps().patch(
project=gcp.project, urlMap=gcp.url_map.name,
body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
@ -820,12 +904,13 @@ def wait_for_global_operation(gcp,
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.globalOperations().get(
project=gcp.project, operation=operation).execute()
project=gcp.project,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(1)
time.sleep(2)
raise Exception('Operation %s did not complete within %d', operation,
timeout_sec)
@ -837,12 +922,13 @@ def wait_for_zone_operation(gcp,
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.zoneOperations().get(
project=gcp.project, zone=zone, operation=operation).execute()
project=gcp.project, zone=zone,
operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
time.sleep(1)
time.sleep(2)
raise Exception('Operation %s did not complete within %d', operation,
timeout_sec)
@ -857,7 +943,7 @@ def wait_for_healthy_backends(gcp,
result = gcp.compute.backendServices().getHealth(
project=gcp.project,
backendService=backend_service.name,
body=config).execute()
body=config).execute(num_retries=_GCP_API_RETRIES)
if 'healthStatus' in result:
healthy = True
for instance in result['healthStatus']:
@ -866,7 +952,7 @@ def wait_for_healthy_backends(gcp,
break
if healthy:
return
time.sleep(1)
time.sleep(2)
raise Exception('Not all backends became healthy within %d seconds: %s' %
(timeout_sec, result))
@ -879,7 +965,7 @@ def get_instance_names(gcp, instance_group):
instanceGroup=instance_group.name,
body={
'instanceState': 'ALL'
}).execute()
}).execute(num_retries=_GCP_API_RETRIES)
if 'items' not in result:
return []
for item in result['items']:
@ -892,26 +978,11 @@ def get_instance_names(gcp, instance_group):
return instance_names
def start_xds_client(cmd):
bootstrap_path = None
with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file:
bootstrap_file.write(
_BOOTSTRAP_TEMPLATE.format(
node_id=socket.gethostname()).encode('utf-8'))
bootstrap_path = bootstrap_file.name
client_process = subprocess.Popen(shlex.split(cmd),
env=dict(
os.environ,
GRPC_XDS_BOOTSTRAP=bootstrap_path))
return client_process
def clean_up(gcp):
if gcp.global_forwarding_rule:
delete_global_forwarding_rule(gcp)
if gcp.target_http_proxy:
delete_target_http_proxy(gcp)
if gcp.target_proxy:
delete_target_proxy(gcp)
if gcp.url_map:
delete_url_map(gcp)
delete_backend_services(gcp)
@ -941,36 +1012,44 @@ class GcpResource(object):
class GcpState(object):
def __init__(self, compute, project):
def __init__(self, compute, alpha_compute, project):
self.compute = compute
self.alpha_compute = alpha_compute
self.project = project
self.health_check = None
self.health_check_firewall_rule = None
self.backend_services = []
self.url_map = None
self.target_http_proxy = None
self.target_proxy = None
self.global_forwarding_rule = None
self.service_port = None
self.instance_template = None
self.instance_groups = []
alpha_compute = None
if args.compute_discovery_document:
with open(args.compute_discovery_document, 'r') as discovery_doc:
compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document:
with open(args.alpha_compute_discovery_document, 'r') as discovery_doc:
alpha_compute = googleapiclient.discovery.build_from_document(
discovery_doc.read())
else:
compute = googleapiclient.discovery.build('compute', 'v1')
if not args.only_stable_gcp_apis:
alpha_compute = googleapiclient.discovery.build('compute', 'alpha')
try:
gcp = GcpState(compute, args.project_id)
gcp = GcpState(compute, alpha_compute, args.project_id)
health_check_name = _BASE_HEALTH_CHECK_NAME + args.gcp_suffix
firewall_name = _BASE_FIREWALL_RULE_NAME + args.gcp_suffix
backend_service_name = _BASE_BACKEND_SERVICE_NAME + args.gcp_suffix
alternate_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-alternate' + args.gcp_suffix
url_map_name = _BASE_URL_MAP_NAME + args.gcp_suffix
service_host_name = _BASE_SERVICE_HOST + args.gcp_suffix
target_http_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix
target_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix
forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + args.gcp_suffix
template_name = _BASE_TEMPLATE_NAME + args.gcp_suffix
instance_group_name = _BASE_INSTANCE_GROUP_NAME + args.gcp_suffix
@ -984,31 +1063,18 @@ try:
alternate_backend_service = add_backend_service(
gcp, alternate_backend_service_name)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_http_proxy(gcp, target_http_proxy_name)
create_target_proxy(gcp, target_proxy_name)
potential_service_ports = list(args.service_port_range)
random.shuffle(potential_service_ports)
if args.forwarding_rule_ip_prefix == '':
potential_ips = ['0.0.0.0']
else:
potential_ips = [
args.forwarding_rule_ip_prefix + str(x) for x in range(256)
]
random.shuffle(potential_ips)
for port in potential_service_ports:
for ip in potential_ips:
try:
create_global_forwarding_rule(gcp, forwarding_rule_name, ip,
port)
gcp.service_port = port
break
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'%s:%d. Retrying with another ip:port.' %
(http_error, ip, port))
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if not gcp.service_port:
raise Exception(
'Failed to find a valid ip:port for the forwarding rule')
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
startup_script = get_startup_script(args.path_to_server_binary,
gcp.service_port)
create_instance_template(gcp, template_name, args.network,
@ -1031,19 +1097,22 @@ try:
if not gcp.instance_template:
result = compute.instanceTemplates().get(
project=args.project_id,
instanceTemplate=template_name).execute()
instanceTemplate=template_name).execute(
num_retries=_GCP_API_RETRIES)
gcp.instance_template = GcpResource(template_name,
result['selfLink'])
if not gcp.backend_services:
result = compute.backendServices().get(
project=args.project_id,
backendService=backend_service_name).execute()
backendService=backend_service_name).execute(
num_retries=_GCP_API_RETRIES)
backend_service = GcpResource(backend_service_name,
result['selfLink'])
gcp.backend_services.append(backend_service)
result = compute.backendServices().get(
project=args.project_id,
backendService=alternate_backend_service_name).execute()
backendService=alternate_backend_service_name).execute(
num_retries=_GCP_API_RETRIES)
alternate_backend_service = GcpResource(
alternate_backend_service_name, result['selfLink'])
gcp.backend_services.append(alternate_backend_service)
@ -1051,14 +1120,16 @@ try:
result = compute.instanceGroups().get(
project=args.project_id,
zone=args.zone,
instanceGroup=instance_group_name).execute()
instanceGroup=instance_group_name).execute(
num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(instance_group_name,
result['selfLink'], args.zone)
gcp.instance_groups.append(instance_group)
result = compute.instanceGroups().get(
project=args.project_id,
zone=args.zone,
instanceGroup=same_zone_instance_group_name).execute()
instanceGroup=same_zone_instance_group_name).execute(
num_retries=_GCP_API_RETRIES)
same_zone_instance_group = InstanceGroup(
same_zone_instance_group_name, result['selfLink'],
args.zone)
@ -1068,7 +1139,7 @@ try:
project=args.project_id,
zone=args.secondary_zone,
instanceGroup=secondary_zone_instance_group_name
).execute()
).execute(num_retries=_GCP_API_RETRIES)
secondary_zone_instance_group = InstanceGroup(
secondary_zone_instance_group_name, result['selfLink'],
args.secondary_zone)
@ -1076,12 +1147,14 @@ try:
if not gcp.health_check:
result = compute.healthChecks().get(
project=args.project_id,
healthCheck=health_check_name).execute()
healthCheck=health_check_name).execute(
num_retries=_GCP_API_RETRIES)
gcp.health_check = GcpResource(health_check_name,
result['selfLink'])
if not gcp.url_map:
result = compute.urlMaps().get(project=args.project_id,
urlMap=url_map_name).execute()
result = compute.urlMaps().get(
project=args.project_id,
urlMap=url_map_name).execute(num_retries=_GCP_API_RETRIES)
gcp.url_map = GcpResource(url_map_name, result['selfLink'])
if not gcp.service_port:
gcp.service_port = args.service_port_range[0]

Loading…
Cancel
Save