Merge branch 'master' into c++msgcomp

reviewable/pr22567/r3
Yash Tibrewal 5 years ago
commit d7c916be49
  1. 2
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 2
      .github/ISSUE_TEMPLATE/cleanup_request.md
  3. 2
      .github/ISSUE_TEMPLATE/feature_request.md
  4. 2
      .github/pull_request_template.md
  5. 123
      BUILD
  6. 10
      BUILD.gn
  7. 51
      CMakeLists.txt
  8. 69
      Makefile
  9. 41
      build_autogenerated.yaml
  10. 9
      config.m4
  11. 9
      config.w32
  12. 5
      doc/environment_variables.md
  13. 2
      examples/cpp/compression/greeter_client.cc
  14. 4
      gRPC-C++.podspec
  15. 11
      gRPC-Core.podspec
  16. 9
      grpc.gemspec
  17. 16
      grpc.gyp
  18. 17
      include/grpc/impl/codegen/grpc_types.h
  19. 16
      include/grpcpp/impl/codegen/client_callback_impl.h
  20. 11
      include/grpcpp/impl/codegen/method_handler_impl.h
  21. 9
      package.xml
  22. 76
      src/compiler/cpp_generator.cc
  23. 49
      src/core/ext/filters/client_channel/client_channel.cc
  24. 83
      src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
  25. 99
      src/core/ext/filters/client_channel/lb_policy/address_filtering.h
  26. 45
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  27. 89
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
  28. 40
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
  29. 5
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
  30. 875
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  31. 7
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  32. 722
      src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
  33. 65
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  34. 1175
      src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
  35. 524
      src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
  36. 1735
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  37. 3
      src/core/ext/filters/client_channel/lb_policy/xds/xds.h
  38. 2
      src/core/ext/filters/client_channel/lb_policy_registry.cc
  39. 23
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  40. 83
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  41. 5
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
  42. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
  43. 15
      src/core/ext/filters/client_channel/server_address.cc
  44. 13
      src/core/ext/filters/client_channel/server_address.h
  45. 12
      src/core/ext/filters/client_channel/xds/xds_api.cc
  46. 5
      src/core/ext/filters/client_channel/xds/xds_api.h
  47. 2
      src/core/ext/filters/client_channel/xds/xds_client.cc
  48. 18
      src/core/ext/filters/client_channel/xds/xds_client_stats.h
  49. 6
      src/core/ext/transport/chttp2/transport/flow_control.cc
  50. 12
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  51. 13
      src/core/lib/channel/channel_stack.h
  52. 84
      src/core/lib/security/credentials/credentials.cc
  53. 56
      src/core/lib/security/credentials/credentials.h
  54. 5
      src/core/lib/surface/init_secure.cc
  55. 20
      src/core/plugin_registry/grpc_plugin_registry.cc
  56. 20
      src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
  57. 2
      src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets
  58. 2
      src/php/README.md
  59. 5
      src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
  60. 2
      src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
  61. 12
      src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pxd.pxi
  62. 47
      src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi
  63. 2
      src/python/grpcio/grpc/experimental/__init__.py
  64. 13
      src/python/grpcio/grpc/experimental/aio/_base_call.py
  65. 15
      src/python/grpcio/grpc/experimental/aio/_base_channel.py
  66. 74
      src/python/grpcio/grpc/experimental/aio/_call.py
  67. 12
      src/python/grpcio/grpc/experimental/aio/_channel.py
  68. 7
      src/python/grpcio/grpc/experimental/aio/_interceptor.py
  69. 5
      src/python/grpcio/grpc/experimental/aio/_typing.py
  70. 7
      src/python/grpcio/grpc_core_dependencies.py
  71. 3
      src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
  72. 1
      src/python/grpcio_tests/tests_aio/interop/local_interop_test.py
  73. 10
      src/python/grpcio_tests/tests_aio/interop/methods.py
  74. 1
      src/python/grpcio_tests/tests_aio/tests.json
  75. 87
      src/python/grpcio_tests/tests_aio/unit/call_test.py
  76. 2
      src/python/grpcio_tests/tests_aio/unit/channel_test.py
  77. 2
      src/python/grpcio_tests/tests_aio/unit/client_interceptor_test.py
  78. 2
      src/python/grpcio_tests/tests_aio/unit/close_channel_test.py
  79. 4
      src/python/grpcio_tests/tests_aio/unit/init_test.py
  80. 9
      src/python/grpcio_tests/tests_aio/unit/metadata_test.py
  81. 2
      src/python/grpcio_tests/tests_aio/unit/server_interceptor_test.py
  82. 159
      src/python/grpcio_tests/tests_aio/unit/wait_for_connection_test.py
  83. 2
      templates/test/cpp/naming/resolver_component_tests_defs.include
  84. 5
      templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template
  85. 6
      templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template
  86. 5
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
  87. 9
      test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
  88. 17
      test/core/client_channel/resolvers/fake_resolver_test.cc
  89. 8
      test/core/client_channel/service_config_test.cc
  90. 2
      test/core/compression/message_compress_fuzzer.cc
  91. 2
      test/core/compression/message_decompress_fuzzer.cc
  92. 2
      test/core/compression/stream_compression_fuzzer.cc
  93. 2
      test/core/compression/stream_decompression_fuzzer.cc
  94. 7
      test/core/end2end/BUILD
  95. 2
      test/core/end2end/README
  96. 36
      test/core/end2end/fixtures/h2_oauth2.cc
  97. 49
      test/core/end2end/fixtures/h2_ssl.cc
  98. 3
      test/core/end2end/fuzzers/client_fuzzer.cc
  99. 3
      test/core/end2end/fuzzers/server_fuzzer.cc
  100. 402
      test/core/end2end/gen_build_yaml.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2,7 +2,7 @@
name: Report a bug
about: Create a report to help us improve
labels: kind/bug, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a cleanup
about: Suggest a cleanup in our repository
labels: kind/internal cleanup, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a feature
about: Suggest an idea for this project
labels: kind/enhancement, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be
-->
@markdroth
@nicolasnoble

123
BUILD

@ -319,8 +319,9 @@ grpc_cc_library(
deps = [
"grpc_common",
"grpc_lb_policy_cds",
"grpc_lb_policy_eds",
"grpc_lb_policy_grpclb",
"grpc_lb_policy_xds",
"grpc_lb_policy_lrs",
"grpc_resolver_xds",
],
)
@ -337,8 +338,9 @@ grpc_cc_library(
deps = [
"grpc_common",
"grpc_lb_policy_cds_secure",
"grpc_lb_policy_eds_secure",
"grpc_lb_policy_grpclb_secure",
"grpc_lb_policy_xds_secure",
"grpc_lb_policy_lrs_secure",
"grpc_resolver_xds_secure",
"grpc_secure",
"grpc_transport_chttp2_client_secure",
@ -1023,7 +1025,9 @@ grpc_cc_library(
"grpc_deadline_filter",
"grpc_client_authority_filter",
"grpc_lb_policy_pick_first",
"grpc_lb_policy_priority",
"grpc_lb_policy_round_robin",
"grpc_lb_policy_weighted_target",
"grpc_client_idle_filter",
"grpc_max_age_filter",
"grpc_message_size_filter",
@ -1235,6 +1239,21 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_grpclb_balancer_addresses",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library(
name = "grpc_lb_policy_grpclb",
srcs = [
@ -1255,6 +1274,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_lb_upb",
"grpc_resolver_fake",
"grpc_transport_chttp2_client_insecure",
@ -1281,6 +1301,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_lb_upb",
"grpc_resolver_fake",
"grpc_secure",
@ -1340,41 +1361,75 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_lb_policy_xds",
name = "grpc_lb_policy_cds",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_xds_client",
],
)
grpc_cc_library(
name = "grpc_lb_policy_cds_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_xds_client_secure",
],
)
grpc_cc_library(
name = "grpc_lb_policy_eds",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
"grpc_xds_client",
],
)
grpc_cc_library(
name = "grpc_lb_policy_xds_secure",
name = "grpc_lb_policy_eds_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
"grpc_xds_client_secure",
],
)
grpc_cc_library(
name = "grpc_lb_policy_cds",
name = "grpc_lb_policy_lrs",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
],
language = "c++",
deps = [
@ -1385,9 +1440,9 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_lb_policy_cds_secure",
name = "grpc_lb_policy_lrs_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
],
language = "c++",
deps = [
@ -1397,6 +1452,24 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_lb_address_filtering",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/address_filtering.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/address_filtering.h",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library(
name = "grpc_lb_subchannel_list",
hdrs = [
@ -1435,6 +1508,35 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_lb_policy_priority",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/priority/priority.cc",
],
external_deps = [
"absl/strings",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
],
)
grpc_cc_library(
name = "grpc_lb_policy_weighted_target",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_address_filtering",
],
)
grpc_cc_library(
name = "lb_server_load_reporting_filter",
srcs = [
@ -1606,6 +1708,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_resolver_dns_selection",
],
)

@ -223,12 +223,16 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/http_proxy.h",
"src/core/ext/filters/client_channel/lb_policy.cc",
"src/core/ext/filters/client_channel/lb_policy.h",
"src/core/ext/filters/client_channel/lb_policy/address_filtering.cc",
"src/core/ext/filters/client_channel/lb_policy/address_filtering.h",
"src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc",
"src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc",
@ -236,10 +240,13 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc",
"src/core/ext/filters/client_channel/lb_policy/priority/priority.cc",
"src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc",
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
"src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/cds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/eds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
"src/core/ext/filters/client_channel/lb_policy_factory.h",
"src/core/ext/filters/client_channel/lb_policy_registry.cc",
@ -960,6 +967,7 @@ config("grpc_config") {
":address_sorting",
":upb",
":absl/types:optional",
":absl/strings:strings",
":absl/container:inlined_vector",
"//third_party/cares",
":address_sorting",

@ -457,7 +457,6 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_c compression_test)
add_dependencies(buildtests_c concurrent_connectivity_test)
add_dependencies(buildtests_c connection_refused_test)
add_dependencies(buildtests_c control_plane_credentials_test)
add_dependencies(buildtests_c cpu_test)
add_dependencies(buildtests_c dns_resolver_connectivity_using_ares_resolver_test)
add_dependencies(buildtests_c dns_resolver_connectivity_using_native_resolver_test)
@ -1316,16 +1315,21 @@ add_library(grpc
src/core/ext/filters/client_channel/http_connect_handshaker.cc
src/core/ext/filters/client_channel/http_proxy.cc
src/core/ext/filters/client_channel/lb_policy.cc
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
src/core/ext/filters/client_channel/lb_policy_registry.cc
src/core/ext/filters/client_channel/local_subchannel_pool.cc
src/core/ext/filters/client_channel/parse_address.cc
@ -1743,6 +1747,7 @@ target_link_libraries(grpc
address_sorting
upb
absl::optional
absl::strings
absl::inlined_vector
)
if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC)
@ -1969,16 +1974,21 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/http_connect_handshaker.cc
src/core/ext/filters/client_channel/http_proxy.cc
src/core/ext/filters/client_channel/lb_policy.cc
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
src/core/ext/filters/client_channel/lb_policy_registry.cc
src/core/ext/filters/client_channel/local_subchannel_pool.cc
src/core/ext/filters/client_channel/parse_address.cc
@ -2320,6 +2330,7 @@ target_link_libraries(grpc_unsecure
address_sorting
upb
absl::optional
absl::strings
absl::inlined_vector
)
if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC)
@ -4823,40 +4834,6 @@ target_link_libraries(connection_refused_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(control_plane_credentials_test
test/core/end2end/cq_verifier.cc
test/core/end2end/data/client_certs.cc
test/core/end2end/data/server1_cert.cc
test/core/end2end/data/server1_key.cc
test/core/end2end/data/test_root_cert.cc
test/core/security/control_plane_credentials_test.cc
)
target_include_directories(control_plane_credentials_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(control_plane_credentials_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr
address_sorting
upb
)
endif()
if(gRPC_BUILD_TESTS)

@ -1047,7 +1047,6 @@ completion_queue_threading_test: $(BINDIR)/$(CONFIG)/completion_queue_threading_
compression_test: $(BINDIR)/$(CONFIG)/compression_test
concurrent_connectivity_test: $(BINDIR)/$(CONFIG)/concurrent_connectivity_test
connection_refused_test: $(BINDIR)/$(CONFIG)/connection_refused_test
control_plane_credentials_test: $(BINDIR)/$(CONFIG)/control_plane_credentials_test
cpu_test: $(BINDIR)/$(CONFIG)/cpu_test
dns_resolver_connectivity_using_ares_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_ares_resolver_test
dns_resolver_connectivity_using_native_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_native_resolver_test
@ -1424,7 +1423,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/compression_test \
$(BINDIR)/$(CONFIG)/concurrent_connectivity_test \
$(BINDIR)/$(CONFIG)/connection_refused_test \
$(BINDIR)/$(CONFIG)/control_plane_credentials_test \
$(BINDIR)/$(CONFIG)/cpu_test \
$(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_ares_resolver_test \
$(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_native_resolver_test \
@ -1916,16 +1914,12 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/cmdline_test || ( echo test cmdline_test failed ; exit 1 )
$(E) "[RUN] Testing combiner_test"
$(Q) $(BINDIR)/$(CONFIG)/combiner_test || ( echo test combiner_test failed ; exit 1 )
$(E) "[RUN] Testing completion_queue_threading_test"
$(Q) $(BINDIR)/$(CONFIG)/completion_queue_threading_test || ( echo test completion_queue_threading_test failed ; exit 1 )
$(E) "[RUN] Testing compression_test"
$(Q) $(BINDIR)/$(CONFIG)/compression_test || ( echo test compression_test failed ; exit 1 )
$(E) "[RUN] Testing concurrent_connectivity_test"
$(Q) $(BINDIR)/$(CONFIG)/concurrent_connectivity_test || ( echo test concurrent_connectivity_test failed ; exit 1 )
$(E) "[RUN] Testing connection_refused_test"
$(Q) $(BINDIR)/$(CONFIG)/connection_refused_test || ( echo test connection_refused_test failed ; exit 1 )
$(E) "[RUN] Testing control_plane_credentials_test"
$(Q) $(BINDIR)/$(CONFIG)/control_plane_credentials_test || ( echo test control_plane_credentials_test failed ; exit 1 )
$(E) "[RUN] Testing cpu_test"
$(Q) $(BINDIR)/$(CONFIG)/cpu_test || ( echo test cpu_test failed ; exit 1 )
$(E) "[RUN] Testing dns_resolver_connectivity_using_ares_resolver_test"
@ -2210,8 +2204,6 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/cli_call_test || ( echo test cli_call_test failed ; exit 1 )
$(E) "[RUN] Testing client_callback_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_callback_end2end_test || ( echo test client_callback_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing client_channel_stress_test"
$(Q) $(BINDIR)/$(CONFIG)/client_channel_stress_test || ( echo test client_channel_stress_test failed ; exit 1 )
$(E) "[RUN] Testing client_interceptors_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_interceptors_end2end_test || ( echo test client_interceptors_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing codegen_test_full"
@ -3646,16 +3638,21 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \
@ -4274,16 +4271,21 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \
@ -7840,53 +7842,6 @@ endif
endif
CONTROL_PLANE_CREDENTIALS_TEST_SRC = \
test/core/end2end/cq_verifier.cc \
test/core/end2end/data/client_certs.cc \
test/core/end2end/data/server1_cert.cc \
test/core/end2end/data/server1_key.cc \
test/core/end2end/data/test_root_cert.cc \
test/core/security/control_plane_credentials_test.cc \
CONTROL_PLANE_CREDENTIALS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CONTROL_PLANE_CREDENTIALS_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/control_plane_credentials_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/control_plane_credentials_test: $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/control_plane_credentials_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/cq_verifier.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/client_certs.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_key.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/test_root_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/security/control_plane_credentials_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_control_plane_credentials_test: $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS:.o=.dep)
endif
endif
CPU_TEST_SRC = \
test/core/gpr/cpu_test.cc \

@ -382,9 +382,11 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.h
- src/core/ext/filters/client_channel/http_proxy.h
- src/core/ext/filters/client_channel/lb_policy.h
- src/core/ext/filters/client_channel/lb_policy/address_filtering.h
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -739,16 +741,21 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.cc
- src/core/ext/filters/client_channel/http_proxy.cc
- src/core/ext/filters/client_channel/lb_policy.cc
- src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
- src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
- src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
- src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
- src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
- src/core/ext/filters/client_channel/lb_policy_registry.cc
- src/core/ext/filters/client_channel/local_subchannel_pool.cc
- src/core/ext/filters/client_channel/parse_address.cc
@ -1130,6 +1137,7 @@ libs:
- address_sorting
- upb
- absl/types:optional
- absl/strings:strings
- absl/container:inlined_vector
baselib: true
dll: true
@ -1276,9 +1284,11 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.h
- src/core/ext/filters/client_channel/http_proxy.h
- src/core/ext/filters/client_channel/lb_policy.h
- src/core/ext/filters/client_channel/lb_policy/address_filtering.h
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -1568,16 +1578,21 @@ libs:
- src/core/ext/filters/client_channel/http_connect_handshaker.cc
- src/core/ext/filters/client_channel/http_proxy.cc
- src/core/ext/filters/client_channel/lb_policy.cc
- src/core/ext/filters/client_channel/lb_policy/address_filtering.cc
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
- src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
- src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
- src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
- src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc
- src/core/ext/filters/client_channel/lb_policy_registry.cc
- src/core/ext/filters/client_channel/local_subchannel_pool.cc
- src/core/ext/filters/client_channel/parse_address.cc
@ -1884,6 +1899,7 @@ libs:
- address_sorting
- upb
- absl/types:optional
- absl/strings:strings
- absl/container:inlined_vector
baselib: true
dll: true
@ -3090,6 +3106,7 @@ targets:
- mac
- name: completion_queue_threading_test
build: test
run: false
language: c
headers: []
src:
@ -3139,25 +3156,6 @@ targets:
- gpr
- address_sorting
- upb
- name: control_plane_credentials_test
build: test
language: c
headers:
- test/core/end2end/cq_verifier.h
- test/core/end2end/data/ssl_test_data.h
src:
- test/core/end2end/cq_verifier.cc
- test/core/end2end/data/client_certs.cc
- test/core/end2end/data/server1_cert.cc
- test/core/end2end/data/server1_key.cc
- test/core/end2end/data/test_root_cert.cc
- test/core/security/control_plane_credentials_test.cc
deps:
- grpc_test_util
- grpc
- gpr
- address_sorting
- upb
- name: cpu_test
build: test
language: c
@ -5513,6 +5511,7 @@ targets:
- name: client_channel_stress_test
gtest: true
build: test
run: false
language: c++
headers:
- test/cpp/end2end/test_service_impl.h

@ -50,16 +50,21 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/http_connect_handshaker.cc \
src/core/ext/filters/client_channel/http_proxy.cc \
src/core/ext/filters/client_channel/lb_policy.cc \
src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \
src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/local_subchannel_pool.cc \
src/core/ext/filters/client_channel/parse_address.cc \
@ -820,7 +825,9 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/priority)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/round_robin)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/weighted_target)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/xds)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns/c_ares)

@ -19,16 +19,21 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\http_connect_handshaker.cc " +
"src\\core\\ext\\filters\\client_channel\\http_proxy.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\address_filtering.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\child_policy_handler.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\client_load_reporting_filter.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_balancer_addresses.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_channel_secure.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_client_stats.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\load_balancer_api.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\priority\\priority.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target\\weighted_target.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\cds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\eds.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\lrs.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\local_subchannel_pool.cc " +
"src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
@ -820,7 +825,9 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\priority");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\xds");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\dns");

@ -57,6 +57,7 @@ some configuration as environment variables that can be set.
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- cronet - traces state in the cronet transport engine
- eds_lb - traces eds LB policy
- executor - traces grpc's internal thread pool ('the executor')
- glb - traces the grpclb load balancer
- handshaker - traces handshaking state
@ -66,12 +67,14 @@ some configuration as environment variables that can be set.
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport
- flowctl - traces http2 flow control
- lrs_lb - traces lrs LB policy
- op_failure - traces error information when failure is pushed onto a
completion queue
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- pollable_refcount - traces reference counting of 'pollable' objects (only
in DEBUG)
- priority_lb - traces priority LB policy
- resource_quota - trace resource quota objects internals
- round_robin - traces the round_robin load balancing policy
- queue_pluck
@ -84,8 +87,8 @@ some configuration as environment variables that can be set.
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
- tsi - traces tsi transport security
- weighted_target_lb - traces weighted_target LB policy
- xds_client - traces xds client
- xds_lb - traces xds LB policy
- xds_resolver - traces xds resolver
The following tracers will only run in binaries built in DEBUG mode. This is

@ -85,7 +85,7 @@ int main(int argc, char** argv) {
args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP);
GreeterClient greeter(grpc::CreateCustomChannel(
"localhost:50051", grpc::InsecureChannelCredentials(), args));
std::string user("world");
std::string user("world world world world");
std::string reply = greeter.SayHello(user);
std::cout << "Greeter received: " << reply << std::endl;

@ -233,9 +233,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
@ -682,9 +684,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',

@ -206,12 +206,16 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
@ -219,10 +223,13 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.h',
'src/core/ext/filters/client_channel/lb_policy_factory.h',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
@ -1030,9 +1037,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
'src/core/ext/filters/client_channel/http_proxy.h',
'src/core/ext/filters/client_channel/lb_policy.h',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.h',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',

@ -128,12 +128,16 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/http_proxy.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc )
@ -141,10 +145,13 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/priority/priority.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/cds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/eds.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy_factory.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy_registry.cc )

@ -426,6 +426,7 @@
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/container:inlined_vector',
],
'sources': [
@ -442,16 +443,21 @@
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
@ -915,6 +921,7 @@
'address_sorting',
'upb',
'absl/types:optional',
'absl/strings:strings',
'absl/container:inlined_vector',
],
'sources': [
@ -931,16 +938,21 @@
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',

@ -348,18 +348,11 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/* Time in milliseconds to wait before a locality is deleted after it's removed
from the received EDS update. If 0, delete the locality immediately. Default
value is 15 minutes. */
#define GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS \
"grpc.xds_locality_retention_interval_ms"
/* Timeout in milliseconds to wait for the localities of a specific priority to
complete their initial connection attempt before xDS fails over to the next
priority. Specifically, the connection attempt of a priority is considered
completed when any locality of that priority is ready or all the localities
of that priority fail to connect. If 0, failover happens immediately. Default
value is 10 seconds. */
#define GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS "grpc.xds_failover_timeout_ms"
/* Timeout in milliseconds to wait for the child of a specific priority to
complete its initial connection attempt before the priority LB policy fails
over to the next priority. Default value is 10 seconds. */
#define GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS \
"grpc.priority_failover_timeout_ms"
/* Timeout in milliseconds to wait for a resource to be returned from
* the xds server before assuming that it does not exist.
* The default is 15 seconds. */

@ -267,8 +267,12 @@ class ClientBidiReactor {
/// StartWritesDone that indicates that there will be no more write ops.
/// The number of RemoveHold calls must match the total number of AddHold
/// calls plus the number of holds added by AddMultipleHolds.
/// The argument to AddMultipleHolds must be positive.
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { stream_->AddHold(holds); }
void AddMultipleHolds(int holds) {
GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
stream_->AddHold(holds);
}
void RemoveHold() { stream_->RemoveHold(); }
/// Notifies the application that all operations associated with this RPC
@ -331,7 +335,10 @@ class ClientReadReactor {
void StartRead(Response* resp) { reader_->Read(resp); }
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { reader_->AddHold(holds); }
void AddMultipleHolds(int holds) {
GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
reader_->AddHold(holds);
}
void RemoveHold() { reader_->RemoveHold(); }
virtual void OnDone(const ::grpc::Status& /*s*/) {}
@ -364,7 +371,10 @@ class ClientWriteReactor {
void StartWritesDone() { writer_->WritesDone(); }
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { writer_->AddHold(holds); }
void AddMultipleHolds(int holds) {
GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
writer_->AddHold(holds);
}
void RemoveHold() { writer_->RemoveHold(); }
virtual void OnDone(const ::grpc::Status& /*s*/) {}

@ -303,10 +303,13 @@ class BidiStreamingHandler
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>*)>
func,
ServiceType* service)
// TODO(vjpai): When gRPC supports C++14, move-capture func in the below
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>, false>(
std::bind(func, service, std::placeholders::_1,
std::placeholders::_2)) {}
[func, service](
::grpc_impl::ServerContext* ctx,
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>*
streamer) { return func(service, ctx, streamer); }) {}
};
template <class RequestType, class ResponseType>
@ -321,7 +324,7 @@ class StreamedUnaryHandler
func)
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerUnaryStreamer<RequestType, ResponseType>, true>(
func) {}
std::move(func)) {}
};
template <class RequestType, class ResponseType>
@ -336,7 +339,7 @@ class SplitServerStreamingHandler
func)
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerSplitStreamer<RequestType, ResponseType>, false>(
func) {}
std::move(func)) {}
};
/// General method handler class for errors that prevent real method use

@ -108,12 +108,16 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/http_proxy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/address_filtering.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/address_filtering.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc" role="src" />
@ -121,10 +125,13 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/priority/priority.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/cds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/xds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/eds.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/xds/xds.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy_registry.cc" role="src" />

@ -1343,11 +1343,14 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
" new ::grpc::internal::StreamedUnaryHandler<\n"
" $Request$, $Response$>(\n"
" [this](::grpc_impl::ServerContext* context,\n"
" ::grpc_impl::ServerUnaryStreamer<\n"
" $Request$, $Response$>* streamer) {\n"
" return this->Streamed$Method$(context,\n"
" streamer);\n"
" }));\n"
"}\n");
printer->Print(*vars,
"~WithStreamedUnaryMethod_$Method$() override {\n"
@ -1391,16 +1394,18 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(
*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler<\n"
" $Request$, $Response$>(\n"
" [this](::grpc_impl::ServerContext* context,\n"
" ::grpc_impl::ServerSplitStreamer<\n"
" $Request$, $Response$>* streamer) {\n"
" return this->Streamed$Method$(context,\n"
" streamer);\n"
" }));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
@ -2251,7 +2256,12 @@ void PrintSourceService(grpc_generator::Printer* printer,
" new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" const $Request$* req,\n"
" $Response$* resp) {\n"
" return service->$Method$(ctx, req, resp);\n"
" }, this)));\n");
} else if (ClientOnlyStreaming(method.get())) {
printer->Print(
*vars,
@ -2260,7 +2270,12 @@ void PrintSourceService(grpc_generator::Printer* printer,
" ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" ::grpc_impl::ServerReader<$Request$>* reader,\n"
" $Response$* resp) {\n"
" return service->$Method$(ctx, reader, resp);\n"
" }, this)));\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print(
*vars,
@ -2269,16 +2284,25 @@ void PrintSourceService(grpc_generator::Printer* printer,
" ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" const $Request$* req,\n"
" ::grpc_impl::ServerWriter<$Response$>* writer) {\n"
" return service->$Method$(ctx, req, writer);\n"
" }, this)));\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
printer->Print(*vars,
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" ::grpc_impl::ServerReaderWriter<$Response$,\n"
" $Request$>* stream) {\n"
" return service->$Method$(ctx, stream);\n"
" }, this)));\n");
}
}
printer->Outdent();

@ -1637,25 +1637,6 @@ void ChannelData::ProcessLbPolicy(
grpc_channel_args_find(resolver_result.args, GRPC_ARG_LB_POLICY_NAME);
policy_name = grpc_channel_arg_get_string(channel_arg);
}
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver has returned.
bool found_balancer_address = false;
for (size_t i = 0; i < resolver_result.addresses.size(); ++i) {
const ServerAddress& address = resolver_result.addresses[i];
if (address.IsBalancer()) {
found_balancer_address = true;
break;
}
}
if (found_balancer_address) {
if (policy_name != nullptr && strcmp(policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
policy_name);
}
policy_name = "grpclb";
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (policy_name == nullptr) policy_name = "pick_first";
@ -2283,10 +2264,32 @@ void CallData::FreeCachedSendOpDataForCompletedBatch(
void CallData::RecvTrailingMetadataReadyForLoadBalancingPolicy(
void* arg, grpc_error* error) {
CallData* calld = static_cast<CallData*>(arg);
// Set error if call did not succeed.
grpc_error* error_for_lb = GRPC_ERROR_NONE;
if (error != GRPC_ERROR_NONE) {
error_for_lb = error;
} else {
const auto& fields = calld->recv_trailing_metadata_->idx.named;
GPR_ASSERT(fields.grpc_status != nullptr);
grpc_status_code status =
grpc_get_status_code_from_metadata(fields.grpc_status->md);
std::string msg;
if (status != GRPC_STATUS_OK) {
error_for_lb = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("call failed"),
GRPC_ERROR_INT_GRPC_STATUS, status);
if (fields.grpc_message != nullptr) {
error_for_lb = grpc_error_set_str(
error_for_lb, GRPC_ERROR_STR_GRPC_MESSAGE,
grpc_slice_ref_internal(GRPC_MDVALUE(fields.grpc_message->md)));
}
}
}
// Invoke callback to LB policy.
Metadata trailing_metadata(calld, calld->recv_trailing_metadata_);
calld->lb_recv_trailing_metadata_ready_(error, &trailing_metadata,
calld->lb_recv_trailing_metadata_ready_(error_for_lb, &trailing_metadata,
&calld->lb_call_state_);
if (error == GRPC_ERROR_NONE) GRPC_ERROR_UNREF(error_for_lb);
// Chain to original callback.
Closure::Run(DEBUG_LOCATION, calld->original_recv_trailing_metadata_ready_,
GRPC_ERROR_REF(error));
@ -3975,8 +3978,10 @@ bool CallData::PickSubchannelLocked(grpc_call_element* elem,
if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem);
// Handle drops.
if (GPR_UNLIKELY(result.subchannel == nullptr)) {
result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
result.error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
} else {
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.

@ -0,0 +1,83 @@
//
// Copyright 2020 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/lib/channel/channel_args.h"
#define GRPC_ARG_HIERARCHICAL_PATH "grpc.internal.address.hierarchical_path"
namespace grpc_core {
namespace {
void* HierarchicalPathCopy(void* p) {
std::vector<std::string>* path = static_cast<std::vector<std::string>*>(p);
return static_cast<void*>(new std::vector<std::string>(*path));
}
void HierarchicalPathDestroy(void* p) {
std::vector<std::string>* path = static_cast<std::vector<std::string>*>(p);
delete path;
}
int HierarchicalPathCompare(void* p1, void* p2) {
std::vector<std::string>* path1 = static_cast<std::vector<std::string>*>(p1);
std::vector<std::string>* path2 = static_cast<std::vector<std::string>*>(p2);
for (size_t i = 0; i < path1->size(); ++i) {
if (path2->size() == i) return 1;
int r = (*path1)[i].compare((*path2)[i]);
if (r != 0) return r;
}
if (path2->size() > path1->size()) return -1;
return 0;
}
const grpc_arg_pointer_vtable hierarchical_path_arg_vtable = {
HierarchicalPathCopy, HierarchicalPathDestroy, HierarchicalPathCompare};
} // namespace
grpc_arg MakeHierarchicalPathArg(const std::vector<std::string>& path) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_HIERARCHICAL_PATH),
const_cast<std::vector<std::string>*>(&path),
&hierarchical_path_arg_vtable);
}
HierarchicalAddressMap MakeHierarchicalAddressMap(
const ServerAddressList& addresses) {
HierarchicalAddressMap result;
for (const ServerAddress& address : addresses) {
auto* path = grpc_channel_args_find_pointer<std::vector<std::string>>(
address.args(), GRPC_ARG_HIERARCHICAL_PATH);
if (path == nullptr || path->empty()) continue;
auto it = path->begin();
ServerAddressList& target_list = result[*it];
++it;
std::vector<std::string> remaining_path(it, path->end());
const char* name_to_remove = GRPC_ARG_HIERARCHICAL_PATH;
grpc_arg new_arg = MakeHierarchicalPathArg(remaining_path);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
address.args(), &name_to_remove, 1, &new_arg, 1);
target_list.emplace_back(address.address(), new_args);
}
return result;
}
} // namespace grpc_core

@ -0,0 +1,99 @@
//
// Copyright 2020 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H
#include <grpc/support/port_platform.h>
#include <map>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "src/core/ext/filters/client_channel/server_address.h"
// The resolver returns a flat list of addresses. When a hierarchy of
// LB policies is in use, each leaf of the hierarchy will need a
// different subset of those addresses. This library provides a
// mechanism for determining which address is passed to which leaf
// policy.
//
// Each address will have an associated path that indicates which child
// it should be sent to at each level of the hierarchy to wind up at the
// right leaf policy. Each LB policy will look at the first element of
// the path of each address to determine which child to send the address
// to. It will then remove that first element when passing the address
// down to its child.
//
// For example, consider the following LB policy hierarchy:
//
// - priority
// - child0 (weighted_target)
// - localityA (round_robin)
// - localityB (round_robin)
// - child1 (weighted_target)
// - localityC (round_robin)
// - localityD (round_robin)
//
// Now consider the following addresses:
// - 10.0.0.1:80 path=["child0", "localityA"]
// - 10.0.0.2:80 path=["child0", "localityB"]
// - 10.0.0.3:80 path=["child1", "localityC"]
// - 10.0.0.4:80 path=["child1", "localityD"]
//
// The priority policy will split this up into two lists, one for each
// of its children:
// - child0:
// - 10.0.0.1:80 path=["localityA"]
// - 10.0.0.2:80 path=["localityB"]
// - child1:
// - 10.0.0.3:80 path=["localityC"]
// - 10.0.0.4:80 path=["localityD"]
//
// The weighted_target policy for child0 will split its list up into two
// lists, one for each of its children:
// - localityA:
// - 10.0.0.1:80 path=[]
// - localityB:
// - 10.0.0.2:80 path=[]
//
// Similarly, the weighted_target policy for child1 will split its list
// up into two lists, one for each of its children:
// - localityC:
// - 10.0.0.3:80 path=[]
// - localityD:
// - 10.0.0.4:80 path=[]
namespace grpc_core {
// Constructs a channel arg containing the hierarchical path
// to be associated with an address.
grpc_arg MakeHierarchicalPathArg(const std::vector<std::string>& path);
// A map from the next path element to the addresses that fall under
// that path element.
using HierarchicalAddressMap = std::map<std::string, ServerAddressList>;
// Splits up the addresses into a separate list for each child.
HierarchicalAddressMap MakeHierarchicalAddressMap(
const ServerAddressList& addresses);
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H \
*/

@ -74,6 +74,7 @@
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@ -1241,25 +1242,11 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
// helper code for creating balancer channel
//
ServerAddressList ExtractBalancerAddresses(const ServerAddressList& addresses) {
ServerAddressList balancer_addresses;
for (size_t i = 0; i < addresses.size(); ++i) {
if (addresses[i].IsBalancer()) {
// Strip out the is_balancer channel arg, since we don't want to
// recursively use the grpclb policy in the channel used to talk to
// the balancers. Note that we do NOT strip out the balancer_name
// channel arg, since we need that to set the authority correctly
// to talk to the balancers.
static const char* args_to_remove[] = {
GRPC_ARG_ADDRESS_IS_BALANCER,
};
balancer_addresses.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_remove(addresses[i].args(), args_to_remove,
GPR_ARRAY_SIZE(args_to_remove)));
}
}
return balancer_addresses;
ServerAddressList ExtractBalancerAddresses(const grpc_channel_args& args) {
const ServerAddressList* addresses =
FindGrpclbBalancerAddressesInChannelArgs(args);
if (addresses != nullptr) return *addresses;
return ServerAddressList();
}
/* Returns the channel args for the LB channel, used to create a bidirectional
@ -1452,27 +1439,25 @@ void GrpcLb::UpdateLocked(UpdateArgs args) {
// helpers for UpdateLocked()
//
// Returns the backend addresses extracted from the given addresses.
ServerAddressList ExtractBackendAddresses(const ServerAddressList& addresses) {
ServerAddressList AddNullLbTokenToAddresses(
const ServerAddressList& addresses) {
static const char* lb_token = "";
grpc_arg arg = grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_GRPCLB_ADDRESS_LB_TOKEN),
const_cast<char*>(lb_token), &lb_token_arg_vtable);
ServerAddressList backend_addresses;
ServerAddressList addresses_out;
for (size_t i = 0; i < addresses.size(); ++i) {
if (!addresses[i].IsBalancer()) {
backend_addresses.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1));
}
addresses_out.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1));
}
return backend_addresses;
return addresses_out;
}
void GrpcLb::ProcessAddressesAndChannelArgsLocked(
const ServerAddressList& addresses, const grpc_channel_args& args) {
// Update fallback address list.
fallback_backend_addresses_ = ExtractBackendAddresses(addresses);
fallback_backend_addresses_ = AddNullLbTokenToAddresses(addresses);
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
@ -1482,7 +1467,7 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked(
args_ = grpc_channel_args_copy_and_add_and_remove(
&args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
// Construct args for balancer channel.
ServerAddressList balancer_addresses = ExtractBalancerAddresses(addresses);
ServerAddressList balancer_addresses = ExtractBalancerAddresses(args);
grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs(
balancer_addresses, response_generator_.get(), &args);
// Create balancer channel if needed.

@ -0,0 +1,89 @@
//
// Copyright 2019 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/useful.h"
// Channel arg key for the list of balancer addresses.
#define GRPC_ARG_GRPCLB_BALANCER_ADDRESSES "grpc.grpclb_balancer_addresses"
// Channel arg key for a string indicating an address's balancer name.
#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name"
namespace grpc_core {
namespace {
void* BalancerAddressesArgCopy(void* p) {
ServerAddressList* address_list = static_cast<ServerAddressList*>(p);
return new ServerAddressList(*address_list);
}
void BalancerAddressesArgDestroy(void* p) {
ServerAddressList* address_list = static_cast<ServerAddressList*>(p);
delete address_list;
}
int BalancerAddressesArgCmp(void* p, void* q) {
ServerAddressList* address_list1 = static_cast<ServerAddressList*>(p);
ServerAddressList* address_list2 = static_cast<ServerAddressList*>(q);
if (address_list1 == nullptr || address_list2 == nullptr) {
return GPR_ICMP(address_list1, address_list2);
}
if (address_list1->size() > address_list2->size()) return 1;
if (address_list1->size() < address_list2->size()) return -1;
for (size_t i = 0; i < address_list1->size(); ++i) {
int retval = (*address_list1)[i].Cmp((*address_list2)[i]);
if (retval != 0) return retval;
}
return 0;
}
const grpc_arg_pointer_vtable kBalancerAddressesArgVtable = {
BalancerAddressesArgCopy, BalancerAddressesArgDestroy,
BalancerAddressesArgCmp};
} // namespace
grpc_arg CreateGrpclbBalancerAddressesArg(
const ServerAddressList* address_list) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES),
const_cast<ServerAddressList*>(address_list),
&kBalancerAddressesArgVtable);
}
const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs(
const grpc_channel_args& args) {
return grpc_channel_args_find_pointer<const ServerAddressList>(
&args, const_cast<char*>(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES));
}
grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name) {
return grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME),
const_cast<char*>(balancer_name));
}
const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args) {
return grpc_channel_args_find_string(
&args, const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME));
}
} // namespace grpc_core

@ -0,0 +1,40 @@
//
// Copyright 2019 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H
#include <grpc/support/port_platform.h>
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/ext/filters/client_channel/server_address.h"
namespace grpc_core {
grpc_arg CreateGrpclbBalancerAddressesArg(
const ServerAddressList* address_list);
const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs(
const grpc_channel_args& args);
grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name);
const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args);
} // namespace grpc_core
#endif /* \
GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H \
*/

@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
@ -55,8 +56,8 @@ RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0);
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
gpr_free(addr_str);
char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find(
addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME));
const char* balancer_name =
FindGrpclbBalancerNameInChannelArgs(*addresses[i].args());
target_authority_entries[i].value.reset(gpr_strdup(balancer_name));
}
RefCountedPtr<TargetAuthorityTable> target_authority_table =

@ -0,0 +1,875 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <limits.h>
#include "absl/strings/str_cat.h"
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
namespace grpc_core {
TraceFlag grpc_lb_priority_trace(false, "priority_lb");
namespace {
constexpr char kPriority[] = "priority_experimental";
// How long we keep a child around for after it is no longer being used
// (either because it has been removed from the config or because we
// have switched to a higher-priority child).
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000;
// Default for how long we wait for a newly created child to get connected
// before starting to attempt the next priority. Overridable via channel arg.
constexpr int kDefaultChildFailoverTimeoutMs = 10000;
// Config for priority LB policy.
class PriorityLbConfig : public LoadBalancingPolicy::Config {
public:
PriorityLbConfig(
std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>
children,
std::vector<std::string> priorities)
: children_(std::move(children)), priorities_(std::move(priorities)) {}
const char* name() const override { return kPriority; }
const std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>&
children() const {
return children_;
}
const std::vector<std::string>& priorities() const { return priorities_; }
private:
const std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>>
children_;
const std::vector<std::string> priorities_;
};
// priority LB policy.
class PriorityLb : public LoadBalancingPolicy {
public:
explicit PriorityLb(Args args);
const char* name() const override { return kPriority; }
void UpdateLocked(UpdateArgs args) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
private:
// Each ChildPriority holds a ref to the PriorityLb.
class ChildPriority : public InternallyRefCounted<ChildPriority> {
public:
ChildPriority(RefCountedPtr<PriorityLb> priority_policy, std::string name);
~ChildPriority() {
priority_policy_.reset(DEBUG_LOCATION, "ChildPriority");
}
const std::string& name() const { return name_; }
void UpdateLocked(RefCountedPtr<LoadBalancingPolicy::Config> config);
void ExitIdleLocked();
void ResetBackoffLocked();
void DeactivateLocked();
void MaybeReactivateLocked();
void MaybeCancelFailoverTimerLocked();
void Orphan() override;
std::unique_ptr<SubchannelPicker> GetPicker() {
return absl::make_unique<RefCountedPickerWrapper>(picker_wrapper_);
}
grpc_connectivity_state connectivity_state() const {
return connectivity_state_;
}
bool failover_timer_callback_pending() const {
return failover_timer_callback_pending_;
}
private:
// A simple wrapper for ref-counting a picker from the child policy.
class RefCountedPicker : public RefCounted<RefCountedPicker> {
public:
explicit RefCountedPicker(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// A non-ref-counted wrapper for RefCountedPicker.
class RefCountedPickerWrapper : public SubchannelPicker {
public:
explicit RefCountedPickerWrapper(RefCountedPtr<RefCountedPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) override { return picker_->Pick(args); }
private:
RefCountedPtr<RefCountedPicker> picker_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<ChildPriority> priority)
: priority_(std::move(priority)) {}
~Helper() { priority_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<ChildPriority> priority_;
};
// Methods for dealing with the child policy.
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void OnConnectivityStateUpdateLocked(
grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker);
void StartFailoverTimerLocked();
static void OnFailoverTimer(void* arg, grpc_error* error);
static void OnFailoverTimerLocked(void* arg, grpc_error* error);
static void OnDeactivationTimer(void* arg, grpc_error* error);
static void OnDeactivationTimerLocked(void* arg, grpc_error* error);
RefCountedPtr<PriorityLb> priority_policy_;
const std::string name_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING;
RefCountedPtr<RefCountedPicker> picker_wrapper_;
// States for delayed removal.
grpc_timer deactivation_timer_;
grpc_closure on_deactivation_timer_;
bool deactivation_timer_callback_pending_ = false;
// States of failover.
grpc_timer failover_timer_;
grpc_closure on_failover_timer_;
grpc_closure on_failover_timer_locked_;
bool failover_timer_callback_pending_ = false;
};
~PriorityLb();
void ShutdownLocked() override;
// Returns UINT32_MAX if child is not in current priority list.
uint32_t GetChildPriorityLocked(const std::string& child_name) const;
void HandleChildConnectivityStateChangeLocked(ChildPriority* child);
void DeleteChild(ChildPriority* child);
void TryNextPriorityLocked(bool report_connecting);
void SelectPriorityLocked(uint32_t priority);
const int child_failover_timeout_ms_;
// Current channel args and config from the resolver.
const grpc_channel_args* args_ = nullptr;
RefCountedPtr<PriorityLbConfig> config_;
HierarchicalAddressMap addresses_;
// Internal state.
bool shutting_down_ = false;
std::map<std::string, OrphanablePtr<ChildPriority>> children_;
// The priority that is being used.
uint32_t current_priority_ = UINT32_MAX;
// Points to the current child from before the most recent update.
// We will continue to use this child until we decide which of the new
// children to use.
ChildPriority* current_child_from_before_update_ = nullptr;
};
//
// PriorityLb
//
PriorityLb::PriorityLb(Args args)
: LoadBalancingPolicy(std::move(args)),
child_failover_timeout_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS,
{kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] created", this);
}
}
PriorityLb::~PriorityLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] destroying priority LB policy", this);
}
grpc_channel_args_destroy(args_);
}
void PriorityLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] shutting down", this);
}
shutting_down_ = true;
children_.clear();
}
void PriorityLb::ExitIdleLocked() {
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] exiting IDLE for current priority %d child %s",
this, current_priority_, child_name.c_str());
}
children_[child_name]->ExitIdleLocked();
}
}
void PriorityLb::ResetBackoffLocked() {
for (const auto& p : children_) p.second->ResetBackoffLocked();
}
void PriorityLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] received update", this);
}
// Save current child.
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
current_child_from_before_update_ = children_[child_name].get();
// Unset current_priority_, since it was an index into the old
// config's priority list and may no longer be valid. It will be
// reset later by TryNextPriorityLocked(), but we unset it here in
// case updating any of our children triggers a state update.
current_priority_ = UINT32_MAX;
}
// Update config.
config_ = std::move(args.config);
// Update args.
grpc_channel_args_destroy(args_);
args_ = args.args;
args.args = nullptr;
// Update addresses.
addresses_ = MakeHierarchicalAddressMap(args.addresses);
// Check all existing children against the new config.
for (const auto& p : children_) {
const std::string& child_name = p.first;
auto& child = p.second;
auto config_it = config_->children().find(child_name);
if (config_it == config_->children().end()) {
// Existing child not found in new config. Deactivate it.
child->DeactivateLocked();
} else {
// Existing child found in new config. Update it.
child->UpdateLocked(config_it->second);
}
}
// Try to get connected.
TryNextPriorityLocked(/*report_connecting=*/children_.empty());
}
uint32_t PriorityLb::GetChildPriorityLocked(
const std::string& child_name) const {
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
if (config_->priorities()[priority] == child_name) return priority;
}
return UINT32_MAX;
}
void PriorityLb::HandleChildConnectivityStateChangeLocked(
ChildPriority* child) {
// Special case for the child that was the current child before the
// most recent update.
if (child == current_child_from_before_update_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] state update for current child from before "
"config update",
this);
}
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
// If it's still READY or IDLE, we stick with this child, so pass
// the new picker up to our parent.
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
} else {
// If it's no longer READY or IDLE, we should stop using it.
// We already started trying other priorities as a result of the
// update, but calling TryNextPriorityLocked() ensures that we will
// properly select between CONNECTING and TRANSIENT_FAILURE as the
// new state to report to our parent.
current_child_from_before_update_ = nullptr;
TryNextPriorityLocked(/*report_connecting=*/true);
}
return;
}
// Otherwise, find the child's priority.
uint32_t child_priority = GetChildPriorityLocked(child->name());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] state update for priority %d, child %s",
this, child_priority, child->name().c_str());
}
// Ignore priorities not in the current config.
if (child_priority == UINT32_MAX) return;
// Ignore lower-than-current priorities.
if (child_priority > current_priority_) return;
// If a child reports TRANSIENT_FAILURE, start trying the next priority.
// Note that even if this is for a higher-than-current priority, we
// may still need to create some children between this priority and
// the current one (e.g., if we got an update that inserted new
// priorities ahead of the current one).
if (child->connectivity_state() == GRPC_CHANNEL_TRANSIENT_FAILURE) {
TryNextPriorityLocked(
/*report_connecting=*/child_priority == current_priority_);
return;
}
// The update is for a higher-than-current priority (or for any
// priority if we don't have any current priority).
if (child_priority < current_priority_) {
// If the child reports READY or IDLE, switch to that priority.
// Otherwise, ignore the update.
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
SelectPriorityLocked(child_priority);
}
return;
}
// The current priority has returned a new picker, so pass it up to
// our parent.
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
}
void PriorityLb::DeleteChild(ChildPriority* child) {
// If this was the current child from before the most recent update,
// stop using it. We already started trying other priorities as a
// result of the update, but calling TryNextPriorityLocked() ensures that
// we will properly select between CONNECTING and TRANSIENT_FAILURE as the
// new state to report to our parent.
if (current_child_from_before_update_ == child) {
current_child_from_before_update_ = nullptr;
TryNextPriorityLocked(/*report_connecting=*/true);
}
children_.erase(child->name());
}
void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] trying priority %d, child %s", this,
priority, child_name.c_str());
}
auto& child = children_[child_name];
if (child == nullptr) {
if (report_connecting) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker")));
}
child = MakeOrphanable<ChildPriority>(
Ref(DEBUG_LOCATION, "ChildPriority"), child_name);
child->UpdateLocked(config_->children().find(child_name)->second);
return;
}
// The child already exists.
child->MaybeReactivateLocked();
// If the child is in state READY or IDLE, switch to it.
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
SelectPriorityLocked(priority);
return;
}
// Child is not READY or IDLE.
// If its failover timer is still pending, give it time to fire.
if (child->failover_timer_callback_pending()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] priority %d, child %s: child still "
"attempting to connect, will wait",
this, priority, child_name.c_str());
}
if (report_connecting) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker")));
}
return;
}
// Child has been failing for a while. Move on to the next priority.
}
// If there are no more priorities to try, report TRANSIENT_FAILURE.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] no priority reachable, putting channel in "
"TRANSIENT_FAILURE",
this);
}
current_priority_ = UINT32_MAX;
current_child_from_before_update_ = nullptr;
grpc_error* error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready priority"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
}
void PriorityLb::SelectPriorityLocked(uint32_t priority) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] selected priority %d, child %s", this,
priority, config_->priorities()[priority].c_str());
}
current_priority_ = priority;
current_child_from_before_update_ = nullptr;
// Deactivate lower priorities.
for (uint32_t p = priority + 1; p < config_->priorities().size(); ++p) {
const std::string& child_name = config_->priorities()[p];
auto it = children_.find(child_name);
if (it != children_.end()) it->second->DeactivateLocked();
}
// Update picker.
auto& child = children_[config_->priorities()[priority]];
channel_control_helper()->UpdateState(child->connectivity_state(),
child->GetPicker());
}
//
// PriorityLb::ChildPriority
//
PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] creating child %s (%p)",
priority_policy_.get(), name_.c_str(), this);
}
GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_failover_timer_locked_, OnFailoverTimerLocked, this,
nullptr);
// Start the failover timer.
StartFailoverTimerLocked();
}
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): orphaned",
priority_policy_.get(), name_.c_str(), this);
}
MaybeCancelFailoverTimerLocked();
if (deactivation_timer_callback_pending_) {
grpc_timer_cancel(&deactivation_timer_);
}
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
priority_policy_->interested_parties());
child_policy_.reset();
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_wrapper_.reset();
if (deactivation_timer_callback_pending_) {
grpc_timer_cancel(&deactivation_timer_);
}
Unref(DEBUG_LOCATION, "ChildPriority+Orphan");
}
void PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config) {
if (priority_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): start update",
priority_policy_.get(), name_.c_str(), this);
}
// Create policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(priority_policy_->args_);
}
// Construct update args.
UpdateArgs update_args;
update_args.config = std::move(config);
update_args.addresses = priority_policy_->addresses_[name_];
update_args.args = grpc_channel_args_copy(priority_policy_->args_);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): updating child policy handler %p",
priority_policy_.get(), name_.c_str(), this, child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
OrphanablePtr<LoadBalancingPolicy>
PriorityLb::ChildPriority::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = priority_policy_->combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_priority_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): created new child policy "
"handler %p",
priority_policy_.get(), name_.c_str(), this, lb_policy.get());
}
// Add the parent's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on the parent LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
priority_policy_->interested_parties());
return lb_policy;
}
void PriorityLb::ChildPriority::ExitIdleLocked() {
if (connectivity_state_ == GRPC_CHANNEL_IDLE &&
!failover_timer_callback_pending_) {
StartFailoverTimerLocked();
}
child_policy_->ExitIdleLocked();
}
void PriorityLb::ChildPriority::ResetBackoffLocked() {
child_policy_->ResetBackoffLocked();
}
void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): state update: %s, picker %p",
priority_policy_.get(), name_.c_str(), this,
ConnectivityStateName(state), picker.get());
}
// Store the state and picker.
connectivity_state_ = state;
picker_wrapper_ = MakeRefCounted<RefCountedPicker>(std::move(picker));
// If READY or TRANSIENT_FAILURE, cancel failover timer.
if (state == GRPC_CHANNEL_READY || state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
MaybeCancelFailoverTimerLocked();
}
// Notify the parent policy.
priority_policy_->HandleChildConnectivityStateChangeLocked(this);
}
void PriorityLb::ChildPriority::StartFailoverTimerLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %d ms",
priority_policy_.get(), name_.c_str(), this,
priority_policy_->child_failover_timeout_ms_);
}
Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release();
grpc_timer_init(
&failover_timer_,
ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_ms_,
&on_failover_timer_);
failover_timer_callback_pending_ = true;
}
void PriorityLb::ChildPriority::MaybeCancelFailoverTimerLocked() {
if (failover_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): cancelling failover timer",
priority_policy_.get(), name_.c_str(), this);
}
grpc_timer_cancel(&failover_timer_);
failover_timer_callback_pending_ = false;
}
}
void PriorityLb::ChildPriority::OnFailoverTimer(void* arg, grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(&self->on_failover_timer_locked_,
GRPC_ERROR_REF(error));
}
void PriorityLb::ChildPriority::OnFailoverTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->failover_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
self->priority_policy_.get(), self->name_.c_str(), self);
}
self->failover_timer_callback_pending_ = false;
self->OnConnectivityStateUpdateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
nullptr);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked");
}
void PriorityLb::ChildPriority::DeactivateLocked() {
// If already deactivated, don't do it again.
if (deactivation_timer_callback_pending_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in %d "
"ms.",
priority_policy_.get(), name_.c_str(), this,
kChildRetentionIntervalMs);
}
MaybeCancelFailoverTimerLocked();
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "ChildPriority+timer").release();
GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&deactivation_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
&on_deactivation_timer_);
deactivation_timer_callback_pending_ = true;
}
void PriorityLb::ChildPriority::MaybeReactivateLocked() {
if (deactivation_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): reactivating",
priority_policy_.get(), name_.c_str(), this);
}
deactivation_timer_callback_pending_ = false;
grpc_timer_cancel(&deactivation_timer_);
}
}
void PriorityLb::ChildPriority::OnDeactivationTimer(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
self->priority_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_deactivation_timer_,
OnDeactivationTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
}
void PriorityLb::ChildPriority::OnDeactivationTimerLocked(void* arg,
grpc_error* error) {
ChildPriority* self = static_cast<ChildPriority*>(arg);
if (error == GRPC_ERROR_NONE && self->deactivation_timer_callback_pending_ &&
!self->priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
self->priority_policy_.get(), self->name_.c_str(), self);
}
self->deactivation_timer_callback_pending_ = false;
self->priority_policy_->DeleteChild(self);
}
self->Unref(DEBUG_LOCATION, "ChildPriority+timer");
}
//
// PriorityLb::ChildPriority::Helper
//
void PriorityLb::ChildPriority::Helper::RequestReresolution() {
if (priority_->priority_policy_->shutting_down_) return;
priority_->priority_policy_->channel_control_helper()->RequestReresolution();
}
RefCountedPtr<SubchannelInterface>
PriorityLb::ChildPriority::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (priority_->priority_policy_->shutting_down_) return nullptr;
return priority_->priority_policy_->channel_control_helper()
->CreateSubchannel(args);
}
void PriorityLb::ChildPriority::Helper::UpdateState(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (priority_->priority_policy_->shutting_down_) return;
// Notify the priority.
priority_->OnConnectivityStateUpdateLocked(state, std::move(picker));
}
void PriorityLb::ChildPriority::Helper::AddTraceEvent(TraceSeverity severity,
StringView message) {
if (priority_->priority_policy_->shutting_down_) return;
priority_->priority_policy_->channel_control_helper()->AddTraceEvent(severity,
message);
}
//
// factory
//
class PriorityLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<PriorityLb>(std::move(args));
}
const char* name() const override { return kPriority; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// priority was mentioned as a policy in the deprecated
// loadBalancingPolicy field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:priority policy requires "
"configuration. Please use loadBalancingConfig field of service "
"config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Children.
std::map<std::string, RefCountedPtr<LoadBalancingPolicy::Config>> children;
auto it = json.object_value().find("children");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:children error:required field missing"));
} else if (it->second.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:children error:type should be object"));
} else {
const Json::Object& object = it->second.object_value();
for (const auto& p : object) {
const std::string& child_name = p.first;
const Json& element = p.second;
if (element.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name,
" error:should be type object")
.c_str()));
} else {
auto it2 = element.object_value().find("config");
if (it2 == element.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name,
" error:missing 'config' field")
.c_str()));
} else {
grpc_error* parse_error = GRPC_ERROR_NONE;
auto config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
it2->second, &parse_error);
if (config == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
error_list.push_back(
GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(
absl::StrCat("field:children key:", child_name).c_str(),
&parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
}
children[child_name] = std::move(config);
}
}
}
}
// Priorities.
std::vector<std::string> priorities;
it = json.object_value().find("priorities");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:priorities error:required field missing"));
} else if (it->second.type() != Json::Type::ARRAY) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:priorities error:type should be array"));
} else {
const Json::Array& array = it->second.array_value();
for (size_t i = 0; i < array.size(); ++i) {
const Json& element = array[i];
if (element.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities element:", i,
" error:should be type string")
.c_str()));
} else if (children.find(element.string_value()) == children.end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities element:", i,
" error:unknown child '", element.string_value(),
"'")
.c_str()));
} else {
priorities.emplace_back(element.string_value());
}
}
if (priorities.size() != children.size()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:priorities error:priorities size (",
priorities.size(), ") != children size (",
children.size(), ")")
.c_str()));
}
}
if (error_list.empty()) {
return MakeRefCounted<PriorityLbConfig>(std::move(children),
std::move(priorities));
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"priority_experimental LB policy config", &error_list);
return nullptr;
}
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_priority_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::PriorityLbFactory>());
}
void grpc_lb_policy_priority_shutdown() {}

@ -370,13 +370,6 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
GRPC_ARG_SERVICE_CONFIG};
// Create a subchannel for each address.
for (size_t i = 0; i < addresses.size(); i++) {
// TODO(roth): we should ideally hide this from the LB policy code. In
// principle, if we're dealing with this special case in the client_channel
// code for selecting grpclb, then we should also strip out these addresses
// there if we're not using grpclb.
if (addresses[i].IsBalancer()) {
continue;
}
InlinedVector<grpc_arg, 3> args_to_add;
const size_t subchannel_address_arg_index = args_to_add.size();
args_to_add.emplace_back(

@ -0,0 +1,722 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <limits.h>
#include <string.h>
#include "absl/strings/str_cat.h"
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/timer.h"
namespace grpc_core {
TraceFlag grpc_lb_weighted_target_trace(false, "weighted_target_lb");
namespace {
constexpr char kWeightedTarget[] = "weighted_target_experimental";
// How long we keep a child around for after it has been removed from
// the config.
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000;
// Config for weighted_target LB policy.
class WeightedTargetLbConfig : public LoadBalancingPolicy::Config {
public:
struct ChildConfig {
uint32_t weight;
RefCountedPtr<LoadBalancingPolicy::Config> config;
};
using TargetMap = std::map<std::string, ChildConfig>;
explicit WeightedTargetLbConfig(TargetMap target_map)
: target_map_(std::move(target_map)) {}
const char* name() const override { return kWeightedTarget; }
const TargetMap& target_map() const { return target_map_; }
private:
TargetMap target_map_;
};
// weighted_target LB policy.
class WeightedTargetLb : public LoadBalancingPolicy {
public:
explicit WeightedTargetLb(Args args);
const char* name() const override { return kWeightedTarget; }
void UpdateLocked(UpdateArgs args) override;
void ResetBackoffLocked() override;
private:
// A simple wrapper for ref-counting a picker from the child policy.
class ChildPickerWrapper : public RefCounted<ChildPickerWrapper> {
public:
explicit ChildPickerWrapper(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// Picks a child using stateless WRR and then delegates to that
// child's picker.
class WeightedPicker : public SubchannelPicker {
public:
// Maintains a weighted list of pickers from each child that is in
// ready state. The first element in the pair represents the end of a
// range proportional to the child's weight. The start of the range
// is the previous value in the vector and is 0 for the first element.
using PickerList =
InlinedVector<std::pair<uint32_t, RefCountedPtr<ChildPickerWrapper>>,
1>;
explicit WeightedPicker(PickerList pickers)
: pickers_(std::move(pickers)) {}
PickResult Pick(PickArgs args) override;
private:
PickerList pickers_;
};
// Each WeightedChild holds a ref to its parent WeightedTargetLb.
class WeightedChild : public InternallyRefCounted<WeightedChild> {
public:
WeightedChild(RefCountedPtr<WeightedTargetLb> weighted_target_policy,
const std::string& name);
~WeightedChild();
void Orphan() override;
void UpdateLocked(const WeightedTargetLbConfig::ChildConfig& config,
ServerAddressList addresses,
const grpc_channel_args* args);
void ResetBackoffLocked();
void DeactivateLocked();
uint32_t weight() const { return weight_; }
grpc_connectivity_state connectivity_state() const {
return connectivity_state_;
}
RefCountedPtr<ChildPickerWrapper> picker_wrapper() const {
return picker_wrapper_;
}
private:
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<WeightedChild> weighted_child)
: weighted_child_(std::move(weighted_child)) {}
~Helper() { weighted_child_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<WeightedChild> weighted_child_;
};
// Methods for dealing with the child policy.
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void OnConnectivityStateUpdateLocked(
grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker);
static void OnDelayedRemovalTimer(void* arg, grpc_error* error);
static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error);
// The owning LB policy.
RefCountedPtr<WeightedTargetLb> weighted_target_policy_;
const std::string& name_;
uint32_t weight_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
RefCountedPtr<ChildPickerWrapper> picker_wrapper_;
grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING;
bool seen_failure_since_ready_ = false;
// States for delayed removal.
grpc_timer delayed_removal_timer_;
grpc_closure on_delayed_removal_timer_;
bool delayed_removal_timer_callback_pending_ = false;
bool shutdown_ = false;
};
~WeightedTargetLb();
void ShutdownLocked() override;
void UpdateStateLocked();
// Current config from the resolver.
RefCountedPtr<WeightedTargetLbConfig> config_;
// Internal state.
bool shutting_down_ = false;
// Children.
std::map<std::string, OrphanablePtr<WeightedChild>> targets_;
};
//
// WeightedTargetLb::WeightedPicker
//
WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick(
PickArgs args) {
// Generate a random number in [0, total weight).
const uint32_t key = rand() % pickers_[pickers_.size() - 1].first;
// Find the index in pickers_ corresponding to key.
size_t mid = 0;
size_t start_index = 0;
size_t end_index = pickers_.size() - 1;
size_t index = 0;
while (end_index > start_index) {
mid = (start_index + end_index) / 2;
if (pickers_[mid].first > key) {
end_index = mid;
} else if (pickers_[mid].first < key) {
start_index = mid + 1;
} else {
index = mid + 1;
break;
}
}
if (index == 0) index = start_index;
GPR_ASSERT(pickers_[index].first > key);
// Delegate to the child picker.
return pickers_[index].second->Pick(args);
}
//
// WeightedTargetLb
//
WeightedTargetLb::WeightedTargetLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created", this);
}
}
WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] destroying weighted_target LB policy",
this);
}
}
void WeightedTargetLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this);
}
shutting_down_ = true;
targets_.clear();
}
void WeightedTargetLb::ResetBackoffLocked() {
for (auto& p : targets_) p.second->ResetBackoffLocked();
}
void WeightedTargetLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this);
}
// Update config.
config_ = std::move(args.config);
// Deactivate the targets not in the new config.
for (const auto& p : targets_) {
const std::string& name = p.first;
WeightedChild* child = p.second.get();
if (config_->target_map().find(name) == config_->target_map().end()) {
child->DeactivateLocked();
}
}
// Add or update the targets in the new config.
HierarchicalAddressMap address_map =
MakeHierarchicalAddressMap(args.addresses);
for (const auto& p : config_->target_map()) {
const std::string& name = p.first;
const WeightedTargetLbConfig::ChildConfig& config = p.second;
auto it = targets_.find(name);
if (it == targets_.end()) {
it = targets_.emplace(std::make_pair(name, nullptr)).first;
it->second = MakeOrphanable<WeightedChild>(
Ref(DEBUG_LOCATION, "WeightedChild"), it->first);
}
it->second->UpdateLocked(config, std::move(address_map[name]), args.args);
}
}
void WeightedTargetLb::UpdateStateLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] scanning children to determine "
"connectivity state",
this);
}
// Construct a new picker which maintains a map of all child pickers
// that are ready. Each child is represented by a portion of the range
// proportional to its weight, such that the total range is the sum of the
// weights of all children.
WeightedPicker::PickerList picker_list;
uint32_t end = 0;
// Also count the number of children in each state, to determine the
// overall state.
size_t num_connecting = 0;
size_t num_idle = 0;
size_t num_transient_failures = 0;
for (const auto& p : targets_) {
const std::string& child_name = p.first;
const WeightedChild* child = p.second.get();
// Skip the targets that are not in the latest update.
if (config_->target_map().find(child_name) == config_->target_map().end()) {
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] child=%s state=%s weight=%d picker=%p",
this, child_name.c_str(),
ConnectivityStateName(child->connectivity_state()),
child->weight(), child->picker_wrapper().get());
}
switch (child->connectivity_state()) {
case GRPC_CHANNEL_READY: {
end += child->weight();
picker_list.push_back(std::make_pair(end, child->picker_wrapper()));
break;
}
case GRPC_CHANNEL_CONNECTING: {
++num_connecting;
break;
}
case GRPC_CHANNEL_IDLE: {
++num_idle;
break;
}
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
++num_transient_failures;
break;
}
default:
GPR_UNREACHABLE_CODE(return );
}
}
// Determine aggregated connectivity state.
grpc_connectivity_state connectivity_state;
if (!picker_list.empty()) {
connectivity_state = GRPC_CHANNEL_READY;
} else if (num_connecting > 0) {
connectivity_state = GRPC_CHANNEL_CONNECTING;
} else if (num_idle > 0) {
connectivity_state = GRPC_CHANNEL_IDLE;
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s",
this, ConnectivityStateName(connectivity_state));
}
std::unique_ptr<SubchannelPicker> picker;
switch (connectivity_state) {
case GRPC_CHANNEL_READY:
picker = absl::make_unique<WeightedPicker>(std::move(picker_list));
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
picker =
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker"));
break;
default:
picker = absl::make_unique<TransientFailurePicker>(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"weighted_target: all children report state TRANSIENT_FAILURE"));
}
channel_control_helper()->UpdateState(connectivity_state, std::move(picker));
}
//
// WeightedTargetLb::WeightedChild
//
WeightedTargetLb::WeightedChild::WeightedChild(
RefCountedPtr<WeightedTargetLb> weighted_target_policy,
const std::string& name)
: weighted_target_policy_(std::move(weighted_target_policy)), name_(name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s",
weighted_target_policy_.get(), this, name_.c_str());
}
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: destroying child",
weighted_target_policy_.get(), this, name_.c_str());
}
weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild");
}
void WeightedTargetLb::WeightedChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: shutting down child",
weighted_target_policy_.get(), this, name_.c_str());
}
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
child_policy_->interested_parties(),
weighted_target_policy_->interested_parties());
child_policy_.reset();
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_wrapper_.reset();
if (delayed_removal_timer_callback_pending_) {
delayed_removal_timer_callback_pending_ = false;
grpc_timer_cancel(&delayed_removal_timer_);
}
shutdown_ = true;
Unref();
}
OrphanablePtr<LoadBalancingPolicy>
WeightedTargetLb::WeightedChild::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = weighted_target_policy_->combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(this->Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_weighted_target_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Created new child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
lb_policy.get());
}
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(
lb_policy->interested_parties(),
weighted_target_policy_->interested_parties());
return lb_policy;
}
void WeightedTargetLb::WeightedChild::UpdateLocked(
const WeightedTargetLbConfig::ChildConfig& config,
ServerAddressList addresses, const grpc_channel_args* args) {
if (weighted_target_policy_->shutting_down_) return;
// Update child weight.
weight_ = config.weight;
// Reactivate if needed.
if (delayed_removal_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: reactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
delayed_removal_timer_callback_pending_ = false;
grpc_timer_cancel(&delayed_removal_timer_);
}
// Create child policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args);
}
// Construct update args.
UpdateArgs update_args;
update_args.config = config.config;
update_args.addresses = std::move(addresses);
update_args.args = grpc_channel_args_copy(args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Updating child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
void WeightedTargetLb::WeightedChild::ResetBackoffLocked() {
child_policy_->ResetBackoffLocked();
}
void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
// Cache the picker in the WeightedChild.
picker_wrapper_ = MakeRefCounted<ChildPickerWrapper>(std::move(picker));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: connectivity "
"state update: state=%s picker_wrapper=%p",
weighted_target_policy_.get(), this, name_.c_str(),
ConnectivityStateName(state), picker_wrapper_.get());
}
// If the child reports IDLE, immediately tell it to exit idle.
if (state == GRPC_CHANNEL_IDLE) child_policy_->ExitIdleLocked();
// Decide what state to report for aggregation purposes.
// If we haven't seen a failure since the last time we were in state
// READY, then we report the state change as-is. However, once we do see
// a failure, we report TRANSIENT_FAILURE and ignore any subsequent state
// changes until we go back into state READY.
if (!seen_failure_since_ready_) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
seen_failure_since_ready_ = true;
}
} else {
if (state != GRPC_CHANNEL_READY) return;
seen_failure_since_ready_ = false;
}
connectivity_state_ = state;
// Notify the LB policy.
weighted_target_policy_->UpdateStateLocked();
}
void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: deactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "WeightedChild+timer").release();
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
delayed_removal_timer_callback_pending_ = true;
grpc_timer_init(&delayed_removal_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs,
&on_delayed_removal_timer_);
}
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg,
grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
self->weighted_target_policy_->combiner()->Run(
GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_,
OnDelayedRemovalTimerLocked, self, nullptr),
GRPC_ERROR_REF(error));
}
void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked(
void* arg, grpc_error* error) {
WeightedChild* self = static_cast<WeightedChild*>(arg);
if (error == GRPC_ERROR_NONE &&
self->delayed_removal_timer_callback_pending_ && !self->shutdown_ &&
self->weight_ == 0) {
self->delayed_removal_timer_callback_pending_ = false;
self->weighted_target_policy_->targets_.erase(self->name_);
}
self->Unref(DEBUG_LOCATION, "WeightedChild+timer");
}
//
// WeightedTargetLb::WeightedChild::Helper
//
RefCountedPtr<SubchannelInterface>
WeightedTargetLb::WeightedChild::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return nullptr;
return weighted_child_->weighted_target_policy_->channel_control_helper()
->CreateSubchannel(args);
}
void WeightedTargetLb::WeightedChild::Helper::UpdateState(
grpc_connectivity_state state, std::unique_ptr<SubchannelPicker> picker) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->OnConnectivityStateUpdateLocked(state, std::move(picker));
}
void WeightedTargetLb::WeightedChild::Helper::RequestReresolution() {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->weighted_target_policy_->channel_control_helper()
->RequestReresolution();
}
void WeightedTargetLb::WeightedChild::Helper::AddTraceEvent(
TraceSeverity severity, StringView message) {
if (weighted_child_->weighted_target_policy_->shutting_down_) return;
weighted_child_->weighted_target_policy_->channel_control_helper()
->AddTraceEvent(severity, message);
}
//
// factory
//
class WeightedTargetLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<WeightedTargetLb>(std::move(args));
}
const char* name() const override { return kWeightedTarget; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// weighted_target was mentioned as a policy in the deprecated
// loadBalancingPolicy field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:weighted_target policy requires "
"configuration. Please use loadBalancingConfig field of service "
"config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Weight map.
WeightedTargetLbConfig::TargetMap target_map;
auto it = json.object_value().find("targets");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:targets error:required field not present"));
} else if (it->second.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:targets error:type should be object"));
} else {
for (const auto& p : it->second.object_value()) {
WeightedTargetLbConfig::ChildConfig child_config;
std::vector<grpc_error*> child_errors =
ParseChildConfig(p.second, &child_config);
if (!child_errors.empty()) {
// Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error
// string is not static in this case.
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("field:targets key:", p.first).c_str());
for (grpc_error* child_error : child_errors) {
error = grpc_error_add_child(error, child_error);
}
error_list.push_back(error);
} else {
target_map[p.first] = std::move(child_config);
}
}
}
if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"weighted_target_experimental LB policy config", &error_list);
return nullptr;
}
return MakeRefCounted<WeightedTargetLbConfig>(std::move(target_map));
}
private:
static std::vector<grpc_error*> ParseChildConfig(
const Json& json, WeightedTargetLbConfig::ChildConfig* child_config) {
std::vector<grpc_error*> error_list;
if (json.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"value should be of type object"));
return error_list;
}
// Weight.
auto it = json.object_value().find("weight");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"required field \"weight\" not specified"));
} else if (it->second.type() != Json::Type::NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:must be of type number"));
} else {
child_config->weight =
gpr_parse_nonnegative_int(it->second.string_value().c_str());
if (child_config->weight == -1) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:unparseable value"));
} else if (child_config->weight == 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:weight error:value must be greater than zero"));
}
}
// Child policy.
it = json.object_value().find("childPolicy");
if (it != json.object_value().end()) {
grpc_error* parse_error = GRPC_ERROR_NONE;
child_config->config =
LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second,
&parse_error);
if (child_config->config == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
std::vector<grpc_error*> child_errors;
child_errors.push_back(parse_error);
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors));
}
}
return error_list;
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_weighted_target_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::WeightedTargetLbFactory>());
}
void grpc_lb_policy_weighted_target_shutdown() {}

@ -37,9 +37,9 @@ namespace {
constexpr char kCds[] = "cds_experimental";
// Config for this LB policy.
class CdsConfig : public LoadBalancingPolicy::Config {
class CdsLbConfig : public LoadBalancingPolicy::Config {
public:
explicit CdsConfig(std::string cluster) : cluster_(std::move(cluster)) {}
explicit CdsLbConfig(std::string cluster) : cluster_(std::move(cluster)) {}
const std::string& cluster() const { return cluster_; }
const char* name() const override { return kCds; }
@ -50,7 +50,7 @@ class CdsConfig : public LoadBalancingPolicy::Config {
// CDS LB policy.
class CdsLb : public LoadBalancingPolicy {
public:
explicit CdsLb(Args args);
CdsLb(RefCountedPtr<XdsClient> xds_client, Args args);
const char* name() const override { return kCds; }
@ -89,7 +89,7 @@ class CdsLb : public LoadBalancingPolicy {
void ShutdownLocked() override;
RefCountedPtr<CdsConfig> config_;
RefCountedPtr<CdsLbConfig> config_;
// Current channel args from the resolver.
const grpc_channel_args* args_ = nullptr;
@ -124,21 +124,37 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
}
// Construct config for child policy.
Json::Object child_config = {
{"edsServiceName",
(cluster_data.eds_service_name.empty() ? parent_->config_->cluster()
: cluster_data.eds_service_name)},
{"clusterName", parent_->config_->cluster()},
{"localityPickingPolicy",
Json::Array{
Json::Object{
{"weighted_target_experimental",
Json::Object{
{"targets", Json::Object()},
}},
},
}},
{"endpointPickingPolicy",
Json::Array{
Json::Object{
{"round_robin", Json::Object()},
},
}},
};
if (!cluster_data.eds_service_name.empty()) {
child_config["edsServiceName"] = cluster_data.eds_service_name;
}
if (cluster_data.lrs_load_reporting_server_name.has_value()) {
child_config["lrsLoadReportingServerName"] =
cluster_data.lrs_load_reporting_server_name.value();
}
Json json = Json::Array{
Json::Object{
{"xds_experimental", std::move(child_config)},
{"eds_experimental", std::move(child_config)},
},
};
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
std::string json_str = json.Dump();
std::string json_str = json.Dump(/*indent=*/1);
gpr_log(GPR_INFO, "[cdslb %p] generated config for child policy: %s",
parent_.get(), json_str.c_str());
}
@ -156,19 +172,19 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
args.args = parent_->args_;
args.channel_control_helper = absl::make_unique<Helper>(parent_->Ref());
parent_->child_policy_ =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"xds_experimental", std::move(args));
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(config->name(),
std::move(args));
if (parent_->child_policy_ == nullptr) {
OnError(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"failed to create xds_experimental child policy"));
"failed to create child policy"));
return;
}
grpc_pollset_set_add_pollset_set(
parent_->child_policy_->interested_parties(),
parent_->interested_parties());
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created child policy xds_experimental (%p)",
parent_.get(), parent_->child_policy_.get());
gpr_log(GPR_INFO, "[cdslb %p] created child policy %s (%p)",
parent_.get(), config->name(), parent_->child_policy_.get());
}
}
// Update child policy.
@ -232,9 +248,8 @@ void CdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
// CdsLb
//
CdsLb::CdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
xds_client_(XdsClient::GetFromChannelArgs(*args.args)) {
CdsLb::CdsLb(RefCountedPtr<XdsClient> xds_client, Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created -- using xds client %p from channel",
this, xds_client_.get());
@ -313,11 +328,19 @@ void CdsLb::UpdateLocked(UpdateArgs args) {
// factory
//
class CdsFactory : public LoadBalancingPolicyFactory {
class CdsLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<CdsLb>(std::move(args));
RefCountedPtr<XdsClient> xds_client =
XdsClient::GetFromChannelArgs(*args.args);
if (xds_client == nullptr) {
gpr_log(GPR_ERROR,
"XdsClient not present in channel args -- cannot instantiate "
"cds LB policy");
return nullptr;
}
return MakeOrphanable<CdsLb>(std::move(xds_client), std::move(args));
}
const char* name() const override { return kCds; }
@ -349,7 +372,7 @@ class CdsFactory : public LoadBalancingPolicyFactory {
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Cds Parser", &error_list);
return nullptr;
}
return MakeRefCounted<CdsConfig>(std::move(cluster));
return MakeRefCounted<CdsLbConfig>(std::move(cluster));
}
};
@ -364,7 +387,7 @@ class CdsFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_cds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::CdsFactory>());
absl::make_unique<grpc_core::CdsLbFactory>());
}
void grpc_lb_policy_cds_shutdown() {}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,524 @@
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/xds/xds_client.h"
#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
namespace grpc_core {
TraceFlag grpc_lb_lrs_trace(false, "lrs_lb");
namespace {
constexpr char kLrs[] = "lrs_experimental";
// Config for LRS LB policy.
class LrsLbConfig : public LoadBalancingPolicy::Config {
public:
LrsLbConfig(RefCountedPtr<LoadBalancingPolicy::Config> child_policy,
std::string cluster_name, std::string eds_service_name,
std::string lrs_load_reporting_server_name,
RefCountedPtr<XdsLocalityName> locality_name)
: child_policy_(std::move(child_policy)),
cluster_name_(std::move(cluster_name)),
eds_service_name_(std::move(eds_service_name)),
lrs_load_reporting_server_name_(
std::move(lrs_load_reporting_server_name)),
locality_name_(std::move(locality_name)) {}
const char* name() const override { return kLrs; }
RefCountedPtr<LoadBalancingPolicy::Config> child_policy() const {
return child_policy_;
}
const std::string& cluster_name() const { return cluster_name_; }
const std::string& eds_service_name() const { return eds_service_name_; }
const std::string& lrs_load_reporting_server_name() const {
return lrs_load_reporting_server_name_;
};
RefCountedPtr<XdsLocalityName> locality_name() const {
return locality_name_;
}
private:
RefCountedPtr<LoadBalancingPolicy::Config> child_policy_;
std::string cluster_name_;
std::string eds_service_name_;
std::string lrs_load_reporting_server_name_;
RefCountedPtr<XdsLocalityName> locality_name_;
};
// LRS LB policy.
class LrsLb : public LoadBalancingPolicy {
public:
LrsLb(RefCountedPtr<XdsClient> xds_client, Args args);
const char* name() const override { return kLrs; }
void UpdateLocked(UpdateArgs args) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
private:
// A simple wrapper for ref-counting a picker from the child policy.
class RefCountedPicker : public RefCounted<RefCountedPicker> {
public:
explicit RefCountedPicker(std::unique_ptr<SubchannelPicker> picker)
: picker_(std::move(picker)) {}
PickResult Pick(PickArgs args) { return picker_->Pick(args); }
private:
std::unique_ptr<SubchannelPicker> picker_;
};
// A picker that wraps the picker from the child to perform load reporting.
class LoadReportingPicker : public SubchannelPicker {
public:
LoadReportingPicker(RefCountedPtr<RefCountedPicker> picker,
RefCountedPtr<XdsClusterLocalityStats> locality_stats)
: picker_(std::move(picker)),
locality_stats_(std::move(locality_stats)) {}
PickResult Pick(PickArgs args);
private:
RefCountedPtr<RefCountedPicker> picker_;
RefCountedPtr<XdsClusterLocalityStats> locality_stats_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<LrsLb> lrs_policy)
: lrs_policy_(std::move(lrs_policy)) {}
~Helper() { lrs_policy_.reset(DEBUG_LOCATION, "Helper"); }
RefCountedPtr<SubchannelInterface> CreateSubchannel(
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
private:
RefCountedPtr<LrsLb> lrs_policy_;
};
~LrsLb();
void ShutdownLocked() override;
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const grpc_channel_args* args);
void UpdateChildPolicyLocked(ServerAddressList addresses,
const grpc_channel_args* args);
void MaybeUpdatePickerLocked();
// Current config from the resolver.
RefCountedPtr<LrsLbConfig> config_;
// Internal state.
bool shutting_down_ = false;
// The xds client.
RefCountedPtr<XdsClient> xds_client_;
// The stats for client-side load reporting.
RefCountedPtr<XdsClusterLocalityStats> locality_stats_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
// Latest state and picker reported by the child policy.
grpc_connectivity_state state_ = GRPC_CHANNEL_IDLE;
RefCountedPtr<RefCountedPicker> picker_;
};
//
// LrsLb::LoadReportingPicker
//
LoadBalancingPolicy::PickResult LrsLb::LoadReportingPicker::Pick(
LoadBalancingPolicy::PickArgs args) {
// Forward the pick to the picker returned from the child policy.
PickResult result = picker_->Pick(args);
if (result.type == PickResult::PICK_COMPLETE &&
result.subchannel != nullptr) {
// Record a call started.
locality_stats_->AddCallStarted();
// Intercept the recv_trailing_metadata op to record call completion.
XdsClusterLocalityStats* locality_stats =
locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release();
result.recv_trailing_metadata_ready =
// Note: This callback does not run in either the control plane
// combiner or in the data plane mutex.
[locality_stats](grpc_error* error, MetadataInterface* /*metadata*/,
CallState* /*call_state*/) {
const bool call_failed = error != GRPC_ERROR_NONE;
locality_stats->AddCallFinished(call_failed);
locality_stats->Unref(DEBUG_LOCATION, "LocalityStats+call");
};
}
return result;
}
//
// LrsLb
//
LrsLb::LrsLb(RefCountedPtr<XdsClient> xds_client, Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] created -- using xds client %p from channel",
this, xds_client_.get());
}
}
LrsLb::~LrsLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] destroying xds LB policy", this);
}
}
void LrsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] shutting down", this);
}
shutting_down_ = true;
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
if (child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
child_policy_.reset();
}
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_.reset();
locality_stats_.reset();
xds_client_.reset();
}
void LrsLb::ExitIdleLocked() {
if (child_policy_ != nullptr) child_policy_->ExitIdleLocked();
}
void LrsLb::ResetBackoffLocked() {
// The XdsClient will have its backoff reset by the xds resolver, so we
// don't need to do it here.
if (child_policy_ != nullptr) child_policy_->ResetBackoffLocked();
}
void LrsLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Received update", this);
}
// Update config.
auto old_config = std::move(config_);
config_ = std::move(args.config);
// Update load reporting if needed.
if (old_config == nullptr ||
config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name() ||
config_->cluster_name() != old_config->cluster_name() ||
config_->eds_service_name() != old_config->eds_service_name() ||
*config_->locality_name() != *old_config->locality_name()) {
locality_stats_ = xds_client_->AddClusterLocalityStats(
config_->lrs_load_reporting_server_name(), config_->cluster_name(),
config_->eds_service_name(), config_->locality_name());
MaybeUpdatePickerLocked();
}
// Update child policy.
UpdateChildPolicyLocked(std::move(args.addresses), args.args);
args.args = nullptr; // Ownership passed to UpdateChildPolicyLocked().
}
void LrsLb::MaybeUpdatePickerLocked() {
if (picker_ != nullptr) {
auto lrs_picker =
absl::make_unique<LoadReportingPicker>(picker_, locality_stats_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] updating connectivity: state=%s picker=%p",
this, ConnectivityStateName(state_), lrs_picker.get());
}
channel_control_helper()->UpdateState(state_, std::move(lrs_picker));
}
}
OrphanablePtr<LoadBalancingPolicy> LrsLb::CreateChildPolicyLocked(
const grpc_channel_args* args) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
absl::make_unique<Helper>(Ref(DEBUG_LOCATION, "Helper"));
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_lrs_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Created new child policy handler %p", this,
lb_policy.get());
}
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void LrsLb::UpdateChildPolicyLocked(ServerAddressList addresses,
const grpc_channel_args* args) {
// Create policy if needed.
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(args);
}
// Construct update args.
UpdateArgs update_args;
update_args.addresses = std::move(addresses);
update_args.config = config_->child_policy();
update_args.args = args;
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO, "[lrs_lb %p] Updating child policy handler %p", this,
child_policy_.get());
}
child_policy_->UpdateLocked(std::move(update_args));
}
//
// LrsLb::Helper
//
RefCountedPtr<SubchannelInterface> LrsLb::Helper::CreateSubchannel(
const grpc_channel_args& args) {
if (lrs_policy_->shutting_down_) return nullptr;
return lrs_policy_->channel_control_helper()->CreateSubchannel(args);
}
void LrsLb::Helper::UpdateState(grpc_connectivity_state state,
std::unique_ptr<SubchannelPicker> picker) {
if (lrs_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) {
gpr_log(GPR_INFO,
"[lrs_lb %p] child connectivity state update: state=%s picker=%p",
lrs_policy_.get(), ConnectivityStateName(state), picker.get());
}
// Save the state and picker.
lrs_policy_->state_ = state;
lrs_policy_->picker_ = MakeRefCounted<RefCountedPicker>(std::move(picker));
// Wrap the picker and return it to the channel.
lrs_policy_->MaybeUpdatePickerLocked();
}
void LrsLb::Helper::RequestReresolution() {
if (lrs_policy_->shutting_down_) return;
lrs_policy_->channel_control_helper()->RequestReresolution();
}
void LrsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
if (lrs_policy_->shutting_down_) return;
lrs_policy_->channel_control_helper()->AddTraceEvent(severity, message);
}
//
// factory
//
class LrsLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
RefCountedPtr<XdsClient> xds_client =
XdsClient::GetFromChannelArgs(*args.args);
if (xds_client == nullptr) {
gpr_log(GPR_ERROR,
"XdsClient not present in channel args -- cannot instantiate "
"lrs LB policy");
return nullptr;
}
return MakeOrphanable<LrsLb>(std::move(xds_client), std::move(args));
}
const char* name() const override { return kLrs; }
RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
// lrs was mentioned as a policy in the deprecated loadBalancingPolicy
// field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:lrs policy requires configuration. "
"Please use loadBalancingConfig field of service config instead.");
return nullptr;
}
std::vector<grpc_error*> error_list;
// Child policy.
RefCountedPtr<LoadBalancingPolicy::Config> child_policy;
auto it = json.object_value().find("childPolicy");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:childPolicy error:required field missing"));
} else {
grpc_error* parse_error = GRPC_ERROR_NONE;
child_policy = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
it->second, &parse_error);
if (child_policy == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
std::vector<grpc_error*> child_errors;
child_errors.push_back(parse_error);
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors));
}
}
// Cluster name.
std::string cluster_name;
it = json.object_value().find("clusterName");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:required field missing"));
} else if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:clusterName error:type should be string"));
} else {
cluster_name = it->second.string_value();
}
// EDS service name.
std::string eds_service_name;
it = json.object_value().find("edsServiceName");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:edsServiceName error:type should be string"));
} else {
eds_service_name = it->second.string_value();
}
}
// Locality.
RefCountedPtr<XdsLocalityName> locality_name;
it = json.object_value().find("locality");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:locality error:required field missing"));
} else {
std::vector<grpc_error*> child_errors =
ParseLocality(it->second, &locality_name);
if (!child_errors.empty()) {
error_list.push_back(
GRPC_ERROR_CREATE_FROM_VECTOR("field:locality", &child_errors));
}
}
// LRS load reporting server name.
std::string lrs_load_reporting_server_name;
it = json.object_value().find("lrsLoadReportingServerName");
if (it == json.object_value().end()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:lrsLoadReportingServerName error:required field missing"));
} else if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:lrsLoadReportingServerName error:type should be string"));
} else {
lrs_load_reporting_server_name = it->second.string_value();
}
if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"lrs_experimental LB policy config", &error_list);
return nullptr;
}
return MakeRefCounted<LrsLbConfig>(
std::move(child_policy), std::move(cluster_name),
std::move(eds_service_name), std::move(lrs_load_reporting_server_name),
std::move(locality_name));
}
private:
static std::vector<grpc_error*> ParseLocality(
const Json& json, RefCountedPtr<XdsLocalityName>* name) {
std::vector<grpc_error*> error_list;
if (json.type() != Json::Type::OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"locality field is not an object"));
return error_list;
}
std::string region;
auto it = json.object_value().find("region");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"region\" field is not a string"));
} else {
region = it->second.string_value();
}
}
std::string zone;
it = json.object_value().find("zone");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"zone\" field is not a string"));
} else {
zone = it->second.string_value();
}
}
std::string subzone;
it = json.object_value().find("subzone");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"\"subzone\" field is not a string"));
} else {
subzone = it->second.string_value();
}
}
if (region.empty() && zone.empty() && subzone.empty()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"at least one of region, zone, or subzone must be set"));
}
if (error_list.empty()) {
*name = MakeRefCounted<XdsLocalityName>(region, zone, subzone);
}
return error_list;
}
};
} // namespace
} // namespace grpc_core
//
// Plugin registration
//
void grpc_lb_policy_lrs_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::LrsLbFactory>());
}
void grpc_lb_policy_lrs_shutdown() {}

File diff suppressed because it is too large Load Diff

@ -29,5 +29,4 @@
#define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER \
"grpc.address_is_backend_from_xds_load_balancer"
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H \
*/
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H */

@ -35,6 +35,8 @@ class RegistryState {
void RegisterLoadBalancingPolicyFactory(
std::unique_ptr<LoadBalancingPolicyFactory> factory) {
gpr_log(GPR_DEBUG, "registering LB policy factory for \"%s\"",
factory->name());
for (size_t i = 0; i < factories_.size(); ++i) {
GPR_ASSERT(strcmp(factories_[i]->name(), factory->name()) != 0);
}

@ -30,6 +30,7 @@
#include <address_sorting/address_sorting.h>
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.h"
@ -107,8 +108,10 @@ class AresDnsResolver : public Resolver {
grpc_millis last_resolution_timestamp_ = -1;
/// retry backoff state
BackOff backoff_;
/// currently resolving addresses
/// currently resolving backend addresses
std::unique_ptr<ServerAddressList> addresses_;
/// currently resolving balancer addresses
std::unique_ptr<ServerAddressList> balancer_addresses_;
/// currently resolving service config
char* service_config_json_ = nullptr;
// has shutdown been initiated
@ -328,9 +331,11 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
r->Unref(DEBUG_LOCATION, "OnResolvedLocked() shutdown");
return;
}
if (r->addresses_ != nullptr) {
if (r->addresses_ != nullptr || r->balancer_addresses_ != nullptr) {
Result result;
result.addresses = std::move(*r->addresses_);
if (r->addresses_ != nullptr) {
result.addresses = std::move(*r->addresses_);
}
if (r->service_config_json_ != nullptr) {
std::string service_config_string = ChooseServiceConfig(
r->service_config_json_, &result.service_config_error);
@ -343,9 +348,16 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
service_config_string, &result.service_config_error);
}
}
result.args = grpc_channel_args_copy(r->channel_args_);
InlinedVector<grpc_arg, 1> new_args;
if (r->balancer_addresses_ != nullptr) {
new_args.push_back(
CreateGrpclbBalancerAddressesArg(r->balancer_addresses_.get()));
}
result.args = grpc_channel_args_copy_and_add(
r->channel_args_, new_args.data(), new_args.size());
r->result_handler()->ReturnResult(std::move(result));
r->addresses_.reset();
r->balancer_addresses_.reset();
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
@ -424,7 +436,8 @@ void AresDnsResolver::StartResolvingLocked() {
GRPC_CLOSURE_INIT(&on_resolved_, OnResolved, this, grpc_schedule_on_exec_ctx);
pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &addresses_, enable_srv_queries_ /* check_grpclb */,
&on_resolved_, &addresses_,
enable_srv_queries_ ? &balancer_addresses_ : nullptr,
request_service_config_ ? &service_config_json_ : nullptr,
query_timeout_ms_, combiner());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();

@ -33,6 +33,7 @@
#include <grpc/support/time.h>
#include <address_sorting/address_sorting.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
#include "src/core/lib/gpr/string.h"
@ -60,6 +61,8 @@ struct grpc_ares_request {
grpc_closure* on_done;
/** the pointer to receive the resolved addresses */
std::unique_ptr<grpc_core::ServerAddressList>* addresses_out;
/** the pointer to receive the resolved balancer addresses */
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses_out;
/** the pointer to receive the service config in JSON */
char** service_config_json_out;
/** the evernt driver used by this request */
@ -84,25 +87,32 @@ typedef struct grpc_ares_hostbyname_request {
bool is_balancer;
} grpc_ares_hostbyname_request;
static void log_address_sorting_list(const ServerAddressList& addresses,
static void log_address_sorting_list(const grpc_ares_request* r,
const ServerAddressList& addresses,
const char* input_output_str) {
for (size_t i = 0; i < addresses.size(); i++) {
char* addr_str;
if (grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true)) {
gpr_log(GPR_INFO, "c-ares address sorting: %s[%" PRIuPTR "]=%s",
input_output_str, i, addr_str);
gpr_log(
GPR_INFO,
"(c-ares resolver) request:%p c-ares address sorting: %s[%" PRIuPTR
"]=%s",
r, input_output_str, i, addr_str);
gpr_free(addr_str);
} else {
gpr_log(GPR_INFO,
"c-ares address sorting: %s[%" PRIuPTR "]=<unprintable>",
input_output_str, i);
gpr_log(
GPR_INFO,
"(c-ares resolver) request:%p c-ares address sorting: %s[%" PRIuPTR
"]=<unprintable>",
r, input_output_str, i);
}
}
}
void grpc_cares_wrapper_address_sorting_sort(ServerAddressList* addresses) {
void grpc_cares_wrapper_address_sorting_sort(const grpc_ares_request* r,
ServerAddressList* addresses) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_cares_address_sorting)) {
log_address_sorting_list(*addresses, "input");
log_address_sorting_list(r, *addresses, "input");
}
address_sorting_sortable* sortables = (address_sorting_sortable*)gpr_zalloc(
sizeof(address_sorting_sortable) * addresses->size());
@ -121,7 +131,7 @@ void grpc_cares_wrapper_address_sorting_sort(ServerAddressList* addresses) {
gpr_free(sortables);
*addresses = std::move(sorted);
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_cares_address_sorting)) {
log_address_sorting_list(*addresses, "output");
log_address_sorting_list(r, *addresses, "output");
}
}
@ -142,7 +152,7 @@ void grpc_ares_complete_request_locked(grpc_ares_request* r) {
r->ev_driver = nullptr;
ServerAddressList* addresses = r->addresses_out->get();
if (addresses != nullptr) {
grpc_cares_wrapper_address_sorting_sort(addresses);
grpc_cares_wrapper_address_sorting_sort(r, addresses);
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
// TODO(apolcyn): allow c-ares to return a service config
@ -184,17 +194,17 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
GRPC_CARES_TRACE_LOG(
"request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r,
hr->host);
if (*r->addresses_out == nullptr) {
*r->addresses_out = absl::make_unique<ServerAddressList>();
std::unique_ptr<ServerAddressList>* address_list_ptr =
hr->is_balancer ? r->balancer_addresses_out : r->addresses_out;
if (*address_list_ptr == nullptr) {
*address_list_ptr = absl::make_unique<ServerAddressList>();
}
ServerAddressList& addresses = **r->addresses_out;
ServerAddressList& addresses = **address_list_ptr;
for (size_t i = 0; hostent->h_addr_list[i] != nullptr; ++i) {
grpc_core::InlinedVector<grpc_arg, 2> args_to_add;
grpc_core::InlinedVector<grpc_arg, 1> args_to_add;
if (hr->is_balancer) {
args_to_add.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BALANCER), 1));
args_to_add.emplace_back(grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME), hr->host));
args_to_add.emplace_back(
grpc_core::CreateGrpclbBalancerNameArg(hr->host));
}
grpc_channel_args* args = grpc_channel_args_copy_and_add(
nullptr, args_to_add.data(), args_to_add.size());
@ -350,7 +360,7 @@ done:
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
grpc_ares_request* r, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
bool check_grpclb, int query_timeout_ms, grpc_core::Combiner* combiner) {
int query_timeout_ms, grpc_core::Combiner* combiner) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
ares_channel* channel = nullptr;
@ -425,7 +435,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
/*is_balancer=*/false);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_locked,
hr);
if (check_grpclb) {
if (r->balancer_addresses_out != nullptr) {
/* Query the SRV record */
grpc_ares_request_ref_locked(r);
char* service_name;
@ -520,7 +530,7 @@ static bool target_matches_localhost(const char* name) {
#ifdef GRPC_ARES_RESOLVE_LOCALHOST_MANUALLY
static bool inner_maybe_resolve_localhost_manually_locked(
const char* name, const char* default_port,
const grpc_ares_request* r, const char* name, const char* default_port,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
grpc_core::UniquePtr<char>* host, grpc_core::UniquePtr<char>* port) {
grpc_core::SplitHostPort(name, host, port);
@ -563,23 +573,24 @@ static bool inner_maybe_resolve_localhost_manually_locked(
(*addrs)->emplace_back(&ipv4_loopback_addr, sizeof(ipv4_loopback_addr),
nullptr /* args */);
// Let the address sorter figure out which one should be tried first.
grpc_cares_wrapper_address_sorting_sort(addrs->get());
grpc_cares_wrapper_address_sorting_sort(r, addrs->get());
return true;
}
return false;
}
static bool grpc_ares_maybe_resolve_localhost_manually_locked(
const char* name, const char* default_port,
const grpc_ares_request* r, const char* name, const char* default_port,
std::unique_ptr<grpc_core::ServerAddressList>* addrs) {
grpc_core::UniquePtr<char> host;
grpc_core::UniquePtr<char> port;
return inner_maybe_resolve_localhost_manually_locked(name, default_port,
return inner_maybe_resolve_localhost_manually_locked(r, name, default_port,
addrs, &host, &port);
}
#else /* GRPC_ARES_RESOLVE_LOCALHOST_MANUALLY */
static bool grpc_ares_maybe_resolve_localhost_manually_locked(
const char* /*name*/, const char* /*default_port*/,
const grpc_ares_request* r, const char* /*name*/,
const char* /*default_port*/,
std::unique_ptr<grpc_core::ServerAddressList>* /*addrs*/) {
return false;
}
@ -588,7 +599,8 @@ static bool grpc_ares_maybe_resolve_localhost_manually_locked(
static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
grpc_ares_request* r =
@ -596,6 +608,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
r->ev_driver = nullptr;
r->on_done = on_done;
r->addresses_out = addrs;
r->balancer_addresses_out = balancer_addrs;
r->service_config_json_out = service_config_json;
r->error = GRPC_ERROR_NONE;
r->pending_queries = 0;
@ -609,7 +622,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
return r;
}
// Early out if the target is localhost and we're on Windows.
if (grpc_ares_maybe_resolve_localhost_manually_locked(name, default_port,
if (grpc_ares_maybe_resolve_localhost_manually_locked(r, name, default_port,
addrs)) {
grpc_ares_complete_request_locked(r);
return r;
@ -618,20 +631,21 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
// as to cut down on lookups over the network, especially in tests:
// https://github.com/grpc/proposal/pull/79
if (target_matches_localhost(name)) {
check_grpclb = false;
r->balancer_addresses_out = nullptr;
r->service_config_json_out = nullptr;
}
// Look up name using c-ares lib.
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
r, dns_server, name, default_port, interested_parties, check_grpclb,
query_timeout_ms, combiner);
r, dns_server, name, default_port, interested_parties, query_timeout_ms,
combiner);
return r;
}
grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
@ -709,7 +723,6 @@ static void on_dns_lookup_done_locked(void* arg, grpc_error* error) {
static_cast<grpc_resolved_address*>(gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs));
for (size_t i = 0; i < (*resolved_addresses)->naddrs; ++i) {
GPR_ASSERT(!(*r->addresses)[i].IsBalancer());
memcpy(&(*resolved_addresses)->addrs[i], &(*r->addresses)[i].address(),
sizeof(grpc_resolved_address));
}
@ -736,9 +749,9 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
grpc_schedule_on_exec_ctx);
r->ares_request = grpc_dns_lookup_ares_locked(
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
&r->on_dns_lookup_done_locked, &r->addresses, false /* check_grpclb */,
nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS,
r->combiner);
&r->on_dns_lookup_done_locked, &r->addresses,
nullptr /* balancer_addresses */, nullptr /* service_config_json */,
GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, r->combiner);
}
static void grpc_resolve_address_ares_impl(const char* name,

@ -63,7 +63,8 @@ extern void (*grpc_resolve_address_ares)(const char* name,
extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addresses, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);
@ -89,7 +90,7 @@ bool grpc_ares_query_ipv6();
/* Sorts destinations in lb_addrs according to RFC 6724. */
void grpc_cares_wrapper_address_sorting_sort(
grpc_core::ServerAddressList* addresses);
const grpc_ares_request* request, grpc_core::ServerAddressList* addresses);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
*/

@ -29,7 +29,8 @@ struct grpc_ares_request {
static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
return NULL;
@ -38,7 +39,8 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;

@ -37,15 +37,12 @@ ServerAddress::ServerAddress(const void* address, size_t address_len,
address_.len = static_cast<socklen_t>(address_len);
}
bool ServerAddress::operator==(const ServerAddress& other) const {
return address_.len == other.address_.len &&
memcmp(address_.addr, other.address_.addr, address_.len) == 0 &&
grpc_channel_args_compare(args_, other.args_) == 0;
}
bool ServerAddress::IsBalancer() const {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(args_, GRPC_ARG_ADDRESS_IS_BALANCER), false);
int ServerAddress::Cmp(const ServerAddress& other) const {
if (address_.len > other.address_.len) return 1;
if (address_.len < other.address_.len) return -1;
int retval = memcmp(address_.addr, other.address_.addr, address_.len);
if (retval != 0) return retval;
return grpc_channel_args_compare(args_, other.args_);
}
} // namespace grpc_core

@ -25,13 +25,6 @@
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/iomgr/resolve_address.h"
// Channel arg key for a bool indicating whether an address is a grpclb
// load balancer (as opposed to a backend).
#define GRPC_ARG_ADDRESS_IS_BALANCER "grpc.address_is_balancer"
// Channel arg key for a string indicating an address's balancer name.
#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name"
namespace grpc_core {
//
@ -73,13 +66,13 @@ class ServerAddress {
return *this;
}
bool operator==(const ServerAddress& other) const;
bool operator==(const ServerAddress& other) const { return Cmp(other) == 0; }
int Cmp(const ServerAddress& other) const;
const grpc_resolved_address& address() const { return address_; }
const grpc_channel_args* args() const { return args_; }
bool IsBalancer() const;
private:
grpc_resolved_address address_;
grpc_channel_args* args_;

@ -1310,7 +1310,7 @@ grpc_error* LocalityParse(
grpc_error* DropParseAndAppend(
const envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload* drop_overload,
XdsApi::DropConfig* drop_config, bool* drop_all) {
XdsApi::DropConfig* drop_config) {
// Get the category.
upb_strview category =
envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload_category(
@ -1341,7 +1341,6 @@ grpc_error* DropParseAndAppend(
}
// Cap numerator to 1000000.
numerator = GPR_MIN(numerator, 1000000);
if (numerator == 1000000) *drop_all = true;
drop_config->AddCategory(std::string(category.data, category.size),
numerator);
return GRPC_ERROR_NONE;
@ -1417,17 +1416,10 @@ grpc_error* EdsResponseParse(
policy, &drop_size);
for (size_t j = 0; j < drop_size; ++j) {
grpc_error* error =
DropParseAndAppend(drop_overload[j], eds_update.drop_config.get(),
&eds_update.drop_all);
DropParseAndAppend(drop_overload[j], eds_update.drop_config.get());
if (error != GRPC_ERROR_NONE) return error;
}
}
// Validate the update content.
if (eds_update.priority_list_update.empty() && !eds_update.drop_all) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"EDS response doesn't contain any valid "
"locality but doesn't require to drop all calls.");
}
eds_update_map->emplace(std::string(cluster_name.data, cluster_name.size),
std::move(eds_update));
}

@ -164,6 +164,7 @@ class XdsApi {
void AddCategory(std::string name, uint32_t parts_per_million) {
drop_category_list_.emplace_back(
DropCategory{std::move(name), parts_per_million});
if (parts_per_million == 1000000) drop_all_ = true;
}
// The only method invoked from the data plane combiner.
@ -173,6 +174,8 @@ class XdsApi {
return drop_category_list_;
}
bool drop_all() const { return drop_all_; }
bool operator==(const DropConfig& other) const {
return drop_category_list_ == other.drop_category_list_;
}
@ -180,12 +183,12 @@ class XdsApi {
private:
DropCategoryList drop_category_list_;
bool drop_all_ = false;
};
struct EdsUpdate {
PriorityListUpdate priority_list_update;
RefCountedPtr<DropConfig> drop_config;
bool drop_all = false;
};
using EdsUpdateMap = std::map<std::string /*eds_service_name*/, EdsUpdate>;

@ -1074,7 +1074,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptEdsUpdate(
" drop categories received (drop_all=%d)",
xds_client(), eds_update.priority_list_update.size(),
eds_update.drop_config->drop_category_list().size(),
eds_update.drop_all);
eds_update.drop_config->drop_all());
for (size_t priority = 0;
priority < eds_update.priority_list_update.size(); ++priority) {
const auto* locality_map_update = eds_update.priority_list_update.Find(

@ -42,11 +42,7 @@ class XdsLocalityName : public RefCounted<XdsLocalityName> {
struct Less {
bool operator()(const XdsLocalityName* lhs,
const XdsLocalityName* rhs) const {
int cmp_result = lhs->region_.compare(rhs->region_);
if (cmp_result != 0) return cmp_result < 0;
cmp_result = lhs->zone_.compare(rhs->zone_);
if (cmp_result != 0) return cmp_result < 0;
return lhs->sub_zone_.compare(rhs->sub_zone_) < 0;
return lhs->Compare(*rhs) < 0;
}
bool operator()(const RefCountedPtr<XdsLocalityName>& lhs,
@ -65,6 +61,18 @@ class XdsLocalityName : public RefCounted<XdsLocalityName> {
sub_zone_ == other.sub_zone_;
}
bool operator!=(const XdsLocalityName& other) const {
return !(*this == other);
}
int Compare(const XdsLocalityName& other) const {
int cmp_result = region_.compare(other.region_);
if (cmp_result != 0) return cmp_result;
cmp_result = zone_.compare(other.zone_);
if (cmp_result != 0) return cmp_result;
return sub_zone_.compare(other.sub_zone_);
}
const std::string& region() const { return region_; }
const std::string& zone() const { return zone_; }
const std::string& sub_zone() const { return sub_zone_; }

@ -284,8 +284,8 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
/* clamp max recv hint to an allowable size */
if (max_size_hint >= UINT32_MAX - sent_init_window) {
max_recv_bytes = UINT32_MAX - sent_init_window;
if (max_size_hint >= kMaxWindowUpdateSize - sent_init_window) {
max_recv_bytes = kMaxWindowUpdateSize - sent_init_window;
} else {
max_recv_bytes = static_cast<uint32_t>(max_size_hint);
}
@ -298,7 +298,7 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
}
/* add some small lookahead to keep pipelines flowing */
GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
GPR_DEBUG_ASSERT(max_recv_bytes <= kMaxWindowUpdateSize - sent_init_window);
if (local_window_delta_ < max_recv_bytes) {
uint32_t add_max_recv_bytes =
static_cast<uint32_t>(max_recv_bytes - local_window_delta_);

@ -1072,9 +1072,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
stream_state->pending_send_message = false;
if (stream_state->state_callback_received[OP_FAILED]) {
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
} else {
grpc_slice_buffer write_slice_buffer;
grpc_slice slice;
@ -1131,9 +1133,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
op_can_be_run(stream_op, s, &oas->state,
OP_SEND_TRAILING_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas);
if (stream_state->state_callback_received[OP_FAILED]) {
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
} else {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;

@ -31,7 +31,18 @@
chains are linear, then channel stacks provide a mechanism to minimize
allocations for that chain.
Call stacks are created by channel stacks and represent the per-call data
for that stack. */
for that stack.
Implementations should take care of the following details for a batch -
1. Synchronization is achieved with a CallCombiner. View
src/core/lib/iomgr/call_combiner.h for more details.
2. If the filter wants to inject an error on the way down, it needs to call
grpc_transport_stream_op_batch_finish_with_failure from within the call
combiner. This will cause any batch callbacks to be called with that error.
3. If the filter wants to inject an error on the way up (from a callback), it
should also inject that error in the recv_trailing_metadata callback so that
it can have an effect on the call status.
*/
#include <grpc/support/port_platform.h>

@ -45,90 +45,6 @@ void grpc_channel_credentials_release(grpc_channel_credentials* creds) {
if (creds) creds->Unref();
}
static std::map<grpc_core::UniquePtr<char>,
grpc_core::RefCountedPtr<grpc_channel_credentials>,
grpc_core::StringLess>* g_grpc_control_plane_creds;
static gpr_mu g_control_plane_creds_mu;
static void do_control_plane_creds_init() {
gpr_mu_init(&g_control_plane_creds_mu);
GPR_ASSERT(g_grpc_control_plane_creds == nullptr);
g_grpc_control_plane_creds =
new std::map<grpc_core::UniquePtr<char>,
grpc_core::RefCountedPtr<grpc_channel_credentials>,
grpc_core::StringLess>();
}
void grpc_control_plane_credentials_init() {
static gpr_once once_init_control_plane_creds = GPR_ONCE_INIT;
gpr_once_init(&once_init_control_plane_creds, do_control_plane_creds_init);
}
void grpc_test_only_control_plane_credentials_destroy() {
delete g_grpc_control_plane_creds;
g_grpc_control_plane_creds = nullptr;
gpr_mu_destroy(&g_control_plane_creds_mu);
}
void grpc_test_only_control_plane_credentials_force_init() {
if (g_grpc_control_plane_creds == nullptr) {
do_control_plane_creds_init();
}
}
bool grpc_channel_credentials_attach_credentials(
grpc_channel_credentials* credentials, const char* authority,
grpc_channel_credentials* control_plane_creds) {
grpc_core::ExecCtx exec_ctx;
return credentials->attach_credentials(authority, control_plane_creds->Ref());
}
bool grpc_control_plane_credentials_register(
const char* authority, grpc_channel_credentials* control_plane_creds) {
grpc_core::ExecCtx exec_ctx;
{
grpc_core::MutexLock lock(&g_control_plane_creds_mu);
auto key = grpc_core::UniquePtr<char>(gpr_strdup(authority));
if (g_grpc_control_plane_creds->find(key) !=
g_grpc_control_plane_creds->end()) {
return false;
}
(*g_grpc_control_plane_creds)[std::move(key)] = control_plane_creds->Ref();
}
return true;
}
bool grpc_channel_credentials::attach_credentials(
const char* authority,
grpc_core::RefCountedPtr<grpc_channel_credentials> control_plane_creds) {
auto key = grpc_core::UniquePtr<char>(gpr_strdup(authority));
if (local_control_plane_creds_.find(key) !=
local_control_plane_creds_.end()) {
return false;
}
local_control_plane_creds_[std::move(key)] = std::move(control_plane_creds);
return true;
}
grpc_core::RefCountedPtr<grpc_channel_credentials>
grpc_channel_credentials::get_control_plane_credentials(const char* authority) {
{
auto key = grpc_core::UniquePtr<char>(gpr_strdup(authority));
auto local_lookup = local_control_plane_creds_.find(key);
if (local_lookup != local_control_plane_creds_.end()) {
return local_lookup->second;
}
{
grpc_core::MutexLock lock(&g_control_plane_creds_mu);
auto global_lookup = g_grpc_control_plane_creds->find(key);
if (global_lookup != g_grpc_control_plane_creds->end()) {
return global_lookup->second;
}
}
}
return duplicate_without_call_credentials();
}
void grpc_call_credentials_release(grpc_call_credentials* creds) {
GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds));
grpc_core::ExecCtx exec_ctx;

@ -28,9 +28,7 @@
#include <grpc/support/sync.h>
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/polling_entity.h"
@ -131,29 +129,10 @@ struct grpc_channel_credentials
return args;
}
// Attaches control_plane_creds to the local registry, under authority,
// if no other creds are currently registered under authority. Returns
// true if registered successfully and false if not.
bool attach_credentials(
const char* authority,
grpc_core::RefCountedPtr<grpc_channel_credentials> control_plane_creds);
// Gets the control plane credentials registered under authority. This
// prefers the local control plane creds registry but falls back to the
// global registry. Lastly, this returns self but with any attached
// call credentials stripped off, in the case that neither the local
// registry nor the global registry have an entry for authority.
grpc_core::RefCountedPtr<grpc_channel_credentials>
get_control_plane_credentials(const char* authority);
const char* type() const { return type_; }
private:
const char* type_;
std::map<grpc_core::UniquePtr<char>,
grpc_core::RefCountedPtr<grpc_channel_credentials>,
grpc_core::StringLess>
local_control_plane_creds_;
};
/* Util to encapsulate the channel credentials in a channel arg. */
@ -167,41 +146,6 @@ grpc_channel_credentials* grpc_channel_credentials_from_arg(
grpc_channel_credentials* grpc_channel_credentials_find_in_args(
const grpc_channel_args* args);
/** EXPERIMENTAL. API MAY CHANGE IN THE FUTURE.
Attaches \a control_plane_creds to \a credentials
under the key \a authority. Returns false if \a authority
is already present, in which case no changes are made.
Note that this API is not thread safe. Only one thread may
attach control plane creds to a given credentials object at
any one time, and new control plane creds must not be
attached after \a credentials has been used to create a channel. */
bool grpc_channel_credentials_attach_credentials(
grpc_channel_credentials* credentials, const char* authority,
grpc_channel_credentials* control_plane_creds);
/** EXPERIMENTAL. API MAY CHANGE IN THE FUTURE.
Registers \a control_plane_creds in the global registry
under the key \a authority. Returns false if \a authority
is already present, in which case no changes are made. */
bool grpc_control_plane_credentials_register(
const char* authority, grpc_channel_credentials* control_plane_creds);
/* Initializes global control plane credentials data. */
void grpc_control_plane_credentials_init();
/* Test only: destroy global control plane credentials data.
* This API is meant for use by a few tests that need to
* satisdy grpc_core::LeakDetector. */
void grpc_test_only_control_plane_credentials_destroy();
/* Test only: force re-initialization of global control
* plane credentials data if it was previously destroyed.
* This API is meant to be used in
* tandem with the
* grpc_test_only_control_plane_credentials_destroy, for
* the few tests that need it. */
void grpc_test_only_control_plane_credentials_force_init();
/* --- grpc_credentials_mdelem_array. --- */
typedef struct {

@ -78,7 +78,4 @@ void grpc_register_security_filters(void) {
maybe_prepend_server_auth_filter, nullptr);
}
void grpc_security_init() {
grpc_core::SecurityRegisterHandshakerFactories();
grpc_control_plane_credentials_init();
}
void grpc_security_init() { grpc_core::SecurityRegisterHandshakerFactories(); }

@ -36,8 +36,14 @@ void grpc_lb_policy_grpclb_init(void);
void grpc_lb_policy_grpclb_shutdown(void);
void grpc_lb_policy_cds_init(void);
void grpc_lb_policy_cds_shutdown(void);
void grpc_lb_policy_xds_init(void);
void grpc_lb_policy_xds_shutdown(void);
void grpc_lb_policy_eds_init(void);
void grpc_lb_policy_eds_shutdown(void);
void grpc_lb_policy_lrs_init(void);
void grpc_lb_policy_lrs_shutdown(void);
void grpc_lb_policy_priority_init(void);
void grpc_lb_policy_priority_shutdown(void);
void grpc_lb_policy_weighted_target_init(void);
void grpc_lb_policy_weighted_target_shutdown(void);
void grpc_lb_policy_pick_first_init(void);
void grpc_lb_policy_pick_first_shutdown(void);
void grpc_lb_policy_round_robin_init(void);
@ -78,8 +84,14 @@ void grpc_register_built_in_plugins(void) {
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_cds_init,
grpc_lb_policy_cds_shutdown);
grpc_register_plugin(grpc_lb_policy_xds_init,
grpc_lb_policy_xds_shutdown);
grpc_register_plugin(grpc_lb_policy_eds_init,
grpc_lb_policy_eds_shutdown);
grpc_register_plugin(grpc_lb_policy_lrs_init,
grpc_lb_policy_lrs_shutdown);
grpc_register_plugin(grpc_lb_policy_priority_init,
grpc_lb_policy_priority_shutdown);
grpc_register_plugin(grpc_lb_policy_weighted_target_init,
grpc_lb_policy_weighted_target_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,

@ -44,8 +44,14 @@ void grpc_lb_policy_grpclb_init(void);
void grpc_lb_policy_grpclb_shutdown(void);
void grpc_lb_policy_cds_init(void);
void grpc_lb_policy_cds_shutdown(void);
void grpc_lb_policy_xds_init(void);
void grpc_lb_policy_xds_shutdown(void);
void grpc_lb_policy_eds_init(void);
void grpc_lb_policy_eds_shutdown(void);
void grpc_lb_policy_lrs_init(void);
void grpc_lb_policy_lrs_shutdown(void);
void grpc_lb_policy_priority_init(void);
void grpc_lb_policy_priority_shutdown(void);
void grpc_lb_policy_weighted_target_init(void);
void grpc_lb_policy_weighted_target_shutdown(void);
void grpc_lb_policy_pick_first_init(void);
void grpc_lb_policy_pick_first_shutdown(void);
void grpc_lb_policy_round_robin_init(void);
@ -86,8 +92,14 @@ void grpc_register_built_in_plugins(void) {
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_cds_init,
grpc_lb_policy_cds_shutdown);
grpc_register_plugin(grpc_lb_policy_xds_init,
grpc_lb_policy_xds_shutdown);
grpc_register_plugin(grpc_lb_policy_eds_init,
grpc_lb_policy_eds_shutdown);
grpc_register_plugin(grpc_lb_policy_lrs_init,
grpc_lb_policy_lrs_shutdown);
grpc_register_plugin(grpc_lb_policy_priority_init,
grpc_lb_policy_priority_shutdown);
grpc_register_plugin(grpc_lb_policy_weighted_target_init,
grpc_lb_policy_weighted_target_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,

@ -137,7 +137,7 @@
<ProtoRoot>%(RelativeDir)</ProtoRoot>
</Protobuf_Compile>
<!-- Remove files not for compile. -->
<Protobuf_Compile Remove="@(Protobuf_Compile)" Condition=" !%(ProtoCompile) " />
<Protobuf_Compile Remove="@(Protobuf_Compile)" Condition=" '%(ProtoCompile)' != 'true' " />
<!-- Ensure invariant Source=%(Identity). -->
<Protobuf_Compile>
<Source>%(Identity)</Source>

@ -58,7 +58,7 @@ $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc
```sh
$ cd grpc
$ git submodule update --init
$ make
$ EXTRA_DEFINES=GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK make
$ [sudo] make install
```

@ -70,7 +70,8 @@ cdef CallbackFailureHandler CQ_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler
InternalError)
class ExecuteBatchError(Exception): pass
class ExecuteBatchError(InternalError):
"""Raised when execute batch returns a failure from Core."""
async def execute_batch(GrpcCallWrapper grpc_call_wrapper,
@ -128,7 +129,7 @@ async def _receive_message(GrpcCallWrapper grpc_call_wrapper,
# the callback (e.g. cancelled).
#
# Since they all indicates finish, they are better be merged.
_LOGGER.debug(e)
_LOGGER.debug('Failed to receive any message from Core')
return receive_op.message()

@ -105,7 +105,7 @@ cdef _actual_aio_shutdown():
)
future.add_done_callback(_grpc_shutdown_wrapper)
elif _global_aio_state.engine is AsyncIOEngine.POLLER:
_global_aio_state.cq.shutdown()
(<PollerCompletionQueue>_global_aio_state.cq).shutdown()
grpc_shutdown_blocking()
else:
raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)

@ -41,6 +41,18 @@ cdef class RPCState(GrpcCallWrapper):
cdef Operation create_send_initial_metadata_op_if_not_sent(self)
cdef class _ServicerContext:
cdef RPCState _rpc_state
cdef object _loop # asyncio.AbstractEventLoop
cdef object _request_deserializer # Callable[[bytes], Any]
cdef object _response_serializer # Callable[[Any], bytes]
cdef class _MessageReceiver:
cdef _ServicerContext _servicer_context
cdef object _agen
cdef enum AioServerStatus:
AIO_SERVER_STATUS_UNKNOWN
AIO_SERVER_STATUS_READY

@ -109,10 +109,6 @@ cdef class RPCState:
cdef class _ServicerContext:
cdef RPCState _rpc_state
cdef object _loop
cdef object _request_deserializer
cdef object _response_serializer
def __cinit__(self,
RPCState rpc_state,
@ -128,9 +124,9 @@ cdef class _ServicerContext:
cdef bytes raw_message
self._rpc_state.raise_for_termination()
if self._rpc_state.client_closed:
return EOF
raw_message = await _receive_message(self._rpc_state, self._loop)
self._rpc_state.raise_for_termination()
if raw_message is None:
return EOF
else:
@ -414,15 +410,28 @@ async def _handle_unary_stream_rpc(object method_handler,
)
async def _message_receiver(_ServicerContext servicer_context):
cdef class _MessageReceiver:
"""Bridge between the async generator API and the reader-writer API."""
cdef object message
while True:
message = await servicer_context.read()
if message is not EOF:
yield message
else:
break
def __cinit__(self, _ServicerContext servicer_context):
self._servicer_context = servicer_context
self._agen = None
async def _async_message_receiver(self):
"""An async generator that receives messages."""
cdef object message
while True:
message = await self._servicer_context.read()
if message is not EOF:
yield message
else:
break
def __aiter__(self):
# Prevents never awaited warning if application never used the async generator
if self._agen is None:
self._agen = self._async_message_receiver()
return self._agen
async def _handle_stream_unary_rpc(object method_handler,
@ -437,7 +446,7 @@ async def _handle_stream_unary_rpc(object method_handler,
)
# Prepares the request generator
cdef object request_async_iterator = _message_receiver(servicer_context)
cdef object request_async_iterator = _MessageReceiver(servicer_context)
# Finishes the application handler
await _finish_handler_with_unary_response(
@ -462,7 +471,7 @@ async def _handle_stream_stream_rpc(object method_handler,
)
# Prepares the request generator
cdef object request_async_iterator = _message_receiver(servicer_context)
cdef object request_async_iterator = _MessageReceiver(servicer_context)
# Finishes the application handler
await _finish_handler_with_stream_responses(
@ -495,6 +504,12 @@ async def _handle_exceptions(RPCState rpc_state, object rpc_coro, object loop):
_LOGGER.debug('RPC cancelled for servicer method [%s]', _decode(rpc_state.method()))
except _ServerStoppedError:
_LOGGER.warning('Aborting method [%s] due to server stop.', _decode(rpc_state.method()))
except ExecuteBatchError:
# If client closed (aka. cancelled), ignore the failed batch operations.
if rpc_state.client_closed:
return
else:
raise
except Exception as e:
_LOGGER.exception('Unexpected [%s] raised by servicer method [%s]' % (
type(e).__name__,

@ -85,6 +85,6 @@ __all__ = (
'insecure_channel_credentials',
)
if sys.version_info[0] >= 3:
if sys.version_info[0] == 3 and sys.version_info[1] >= 6:
from grpc._simple_stubs import unary_unary, unary_stream, stream_unary, stream_stream
__all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream)

@ -117,6 +117,19 @@ class Call(RpcContext, metaclass=ABCMeta):
The details string of the RPC.
"""
@abstractmethod
async def wait_for_connection(self) -> None:
"""Waits until connected to peer and raises aio.AioRpcError if failed.
This is an EXPERIMENTAL method.
This method ensures the RPC has been successfully connected. Otherwise,
an AioRpcError will be raised to explain the reason of the connection
failure.
This method is recommended for building retry mechanisms.
"""
class UnaryUnaryCall(Generic[RequestType, ResponseType],
Call,

@ -14,12 +14,13 @@
"""Abstract base classes for Channel objects and Multicallable objects."""
import abc
from typing import Any, AsyncIterable, Optional
from typing import Any, Optional
import grpc
from . import _base_call
from ._typing import DeserializingFunction, MetadataType, SerializingFunction
from ._typing import (DeserializingFunction, MetadataType, RequestIterableType,
SerializingFunction)
_IMMUTABLE_EMPTY_TUPLE = tuple()
@ -105,7 +106,7 @@ class StreamUnaryMultiCallable(abc.ABC):
@abc.abstractmethod
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
request_iterator: Optional[RequestIterableType] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
@ -115,7 +116,8 @@ class StreamUnaryMultiCallable(abc.ABC):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
request_iterator: An optional async iterable or iterable of request
messages for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
@ -142,7 +144,7 @@ class StreamStreamMultiCallable(abc.ABC):
@abc.abstractmethod
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
request_iterator: Optional[RequestIterableType] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
@ -152,7 +154,8 @@ class StreamStreamMultiCallable(abc.ABC):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
request_iterator: An optional async iterable or iterable of request
messages for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the

@ -14,10 +14,11 @@
"""Invocation-side implementation of gRPC Asyncio Python."""
import asyncio
from functools import partial
import logging
import enum
from typing import AsyncIterable, Awaitable, Dict, Optional
import inspect
import logging
from functools import partial
from typing import AsyncIterable, Optional, Tuple
import grpc
from grpc import _common
@ -25,7 +26,8 @@ from grpc._cython import cygrpc
from . import _base_call
from ._typing import (DeserializingFunction, DoneCallbackType, MetadataType,
RequestType, ResponseType, SerializingFunction)
MetadatumType, RequestIterableType, RequestType,
ResponseType, SerializingFunction)
__all__ = 'AioRpcError', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall'
@ -105,7 +107,7 @@ class AioRpcError(grpc.RpcError):
"""
return self._details
def initial_metadata(self) -> Optional[Dict]:
def initial_metadata(self) -> Optional[MetadataType]:
"""Accesses the initial metadata sent by the server.
Returns:
@ -113,7 +115,7 @@ class AioRpcError(grpc.RpcError):
"""
return self._initial_metadata
def trailing_metadata(self) -> Optional[Dict]:
def trailing_metadata(self) -> Optional[MetadataType]:
"""Accesses the trailing metadata sent by the server.
Returns:
@ -161,7 +163,7 @@ class Call:
_loop: asyncio.AbstractEventLoop
_code: grpc.StatusCode
_cython_call: cygrpc._AioCall
_metadata: MetadataType
_metadata: Tuple[MetadatumType]
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
@ -171,7 +173,7 @@ class Call:
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._cython_call = cython_call
self._metadata = metadata
self._metadata = tuple(metadata)
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
@ -248,9 +250,8 @@ class _APIStyle(enum.IntEnum):
class _UnaryResponseMixin(Call):
_call_response: asyncio.Task
def _init_unary_response_mixin(self,
response_coro: Awaitable[ResponseType]):
self._call_response = self._loop.create_task(response_coro)
def _init_unary_response_mixin(self, response_task: asyncio.Task):
self._call_response = response_task
def cancel(self) -> bool:
if super().cancel():
@ -362,14 +363,14 @@ class _StreamRequestMixin(Call):
_request_style: _APIStyle
def _init_stream_request_mixin(
self, request_async_iterator: Optional[AsyncIterable[RequestType]]):
self, request_iterator: Optional[RequestIterableType]):
self._metadata_sent = asyncio.Event(loop=self._loop)
self._done_writing_flag = False
# If user passes in an async iterator, create a consumer Task.
if request_async_iterator is not None:
if request_iterator is not None:
self._async_request_poller = self._loop.create_task(
self._consume_request_iterator(request_async_iterator))
self._consume_request_iterator(request_iterator))
self._request_style = _APIStyle.ASYNC_GENERATOR
else:
self._async_request_poller = None
@ -391,11 +392,17 @@ class _StreamRequestMixin(Call):
def _metadata_sent_observer(self):
self._metadata_sent.set()
async def _consume_request_iterator(
self, request_async_iterator: AsyncIterable[RequestType]) -> None:
async def _consume_request_iterator(self,
request_iterator: RequestIterableType
) -> None:
try:
async for request in request_async_iterator:
await self._write(request)
if inspect.isasyncgen(request_iterator):
async for request in request_iterator:
await self._write(request)
else:
for request in request_iterator:
await self._write(request)
await self._done_writing()
except AioRpcError as rpc_error:
# Rpc status should be exposed through other API. Exceptions raised
@ -450,6 +457,11 @@ class _StreamRequestMixin(Call):
self._raise_for_different_style(_APIStyle.READER_WRITER)
await self._done_writing()
async def wait_for_connection(self) -> None:
await self._metadata_sent.wait()
if self.done():
await self._raise_for_status()
class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
"""Object for managing unary-unary RPC calls.
@ -457,6 +469,7 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
Returned when an instance of `UnaryUnaryMultiCallable` object is called.
"""
_request: RequestType
_invocation_task: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self, request: RequestType, deadline: Optional[float],
@ -470,7 +483,8 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._request = request
self._init_unary_response_mixin(self._invoke())
self._invocation_task = loop.create_task(self._invoke())
self._init_unary_response_mixin(self._invocation_task)
async def _invoke(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
@ -492,6 +506,11 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
else:
return cygrpc.EOF
async def wait_for_connection(self) -> None:
await self._invocation_task
if self.done():
await self._raise_for_status()
class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
"""Object for managing unary-stream RPC calls.
@ -528,6 +547,11 @@ class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
self.cancel()
raise
async def wait_for_connection(self) -> None:
await self._send_unary_request_task
if self.done():
await self._raise_for_status()
class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
_base_call.StreamUnaryCall):
@ -537,8 +561,7 @@ class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
"""
# pylint: disable=too-many-arguments
def __init__(self,
request_async_iterator: Optional[AsyncIterable[RequestType]],
def __init__(self, request_iterator: Optional[RequestIterableType],
deadline: Optional[float], metadata: MetadataType,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
@ -549,8 +572,8 @@ class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._init_stream_request_mixin(request_async_iterator)
self._init_unary_response_mixin(self._conduct_rpc())
self._init_stream_request_mixin(request_iterator)
self._init_unary_response_mixin(loop.create_task(self._conduct_rpc()))
async def _conduct_rpc(self) -> ResponseType:
try:
@ -576,8 +599,7 @@ class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call,
_initializer: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self,
request_async_iterator: Optional[AsyncIterable[RequestType]],
def __init__(self, request_iterator: Optional[RequestIterableType],
deadline: Optional[float], metadata: MetadataType,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
@ -588,7 +610,7 @@ class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call,
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._initializer = self._loop.create_task(self._prepare_rpc())
self._init_stream_request_mixin(request_async_iterator)
self._init_stream_request_mixin(request_iterator)
self._init_stream_response_mixin(self._initializer)
async def _prepare_rpc(self):

@ -15,7 +15,7 @@
import asyncio
import sys
from typing import Any, AsyncIterable, Iterable, Optional, Sequence
from typing import Any, Iterable, Optional, Sequence
import grpc
from grpc import _common, _compression, _grpcio_metadata
@ -27,7 +27,7 @@ from ._call import (StreamStreamCall, StreamUnaryCall, UnaryStreamCall,
from ._interceptor import (InterceptedUnaryUnaryCall,
UnaryUnaryClientInterceptor)
from ._typing import (ChannelArgumentType, DeserializingFunction, MetadataType,
SerializingFunction)
SerializingFunction, RequestIterableType)
from ._utils import _timeout_to_deadline
_IMMUTABLE_EMPTY_TUPLE = tuple()
@ -146,7 +146,7 @@ class StreamUnaryMultiCallable(_BaseMultiCallable,
_base_channel.StreamUnaryMultiCallable):
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
request_iterator: Optional[RequestIterableType] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
@ -158,7 +158,7 @@ class StreamUnaryMultiCallable(_BaseMultiCallable,
deadline = _timeout_to_deadline(timeout)
call = StreamUnaryCall(request_async_iterator, deadline, metadata,
call = StreamUnaryCall(request_iterator, deadline, metadata,
credentials, wait_for_ready, self._channel,
self._method, self._request_serializer,
self._response_deserializer, self._loop)
@ -170,7 +170,7 @@ class StreamStreamMultiCallable(_BaseMultiCallable,
_base_channel.StreamStreamMultiCallable):
def __call__(self,
request_async_iterator: Optional[AsyncIterable[Any]] = None,
request_iterator: Optional[RequestIterableType] = None,
timeout: Optional[float] = None,
metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE,
credentials: Optional[grpc.CallCredentials] = None,
@ -182,7 +182,7 @@ class StreamStreamMultiCallable(_BaseMultiCallable,
deadline = _timeout_to_deadline(timeout)
call = StreamStreamCall(request_async_iterator, deadline, metadata,
call = StreamStreamCall(request_iterator, deadline, metadata,
credentials, wait_for_ready, self._channel,
self._method, self._request_serializer,
self._response_deserializer, self._loop)

@ -330,6 +330,10 @@ class InterceptedUnaryUnaryCall(_base_call.UnaryUnaryCall):
response = yield from call.__await__()
return response
async def wait_for_connection(self) -> None:
call = await self._interceptors_task
return await call.wait_for_connection()
class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
"""Final UnaryUnaryCall class finished with a response."""
@ -374,3 +378,6 @@ class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
# for telling the interpreter that __await__ is a generator.
yield None
return self._response
async def wait_for_connection(self) -> None:
pass

@ -13,7 +13,9 @@
# limitations under the License.
"""Common types for gRPC Async API"""
from typing import Any, AnyStr, Callable, Sequence, Tuple, TypeVar
from typing import (Any, AnyStr, AsyncIterable, Callable, Iterable, Sequence,
Tuple, TypeVar, Union)
from grpc._cython.cygrpc import EOF
RequestType = TypeVar('RequestType')
@ -25,3 +27,4 @@ MetadataType = Sequence[MetadatumType]
ChannelArgumentType = Sequence[Tuple[str, Any]]
EOFType = type(EOF)
DoneCallbackType = Callable[[Any], None]
RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]]

@ -28,16 +28,21 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',

@ -504,7 +504,8 @@ class PythonPluginTest(unittest.TestCase):
service.server.stop(None)
@unittest.skipIf(sys.version_info[0] < 3, "Unsupported on Python 2.")
@unittest.skipIf(sys.version_info[0] < 3 or sys.version_info[1] < 6,
"Unsupported on Python 2.")
class SimpleStubsPluginTest(unittest.TestCase):
servicer_methods = _ServicerMethods()

@ -64,7 +64,6 @@ class InteropTestCaseMixin:
await methods.test_interoperability(
methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE, self._stub, None)
@unittest.skip('TODO(https://github.com/grpc/grpc/issues/21707)')
async def test_timeout_on_sleeping_server(self):
await methods.test_interoperability(
methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER, self._stub, None)

@ -15,8 +15,9 @@
import argparse
import asyncio
import enum
import collections
import datetime
import enum
import inspect
import json
import os
@ -220,12 +221,15 @@ async def _cancel_after_first_response(stub: test_pb2_grpc.TestServiceStub):
async def _timeout_on_sleeping_server(stub: test_pb2_grpc.TestServiceStub):
request_payload_size = 27182
time_limit = datetime.timedelta(seconds=1)
call = stub.FullDuplexCall(timeout=0.001)
call = stub.FullDuplexCall(timeout=time_limit.total_seconds())
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
payload=messages_pb2.Payload(body=b'\x00' * request_payload_size))
payload=messages_pb2.Payload(body=b'\x00' * request_payload_size),
response_parameters=(messages_pb2.ResponseParameters(
interval_us=int(time_limit.total_seconds() * 2 * 10**6)),))
await call.write(request)
await call.done_writing()
try:

@ -28,5 +28,6 @@
"unit.server_interceptor_test.TestServerInterceptor",
"unit.server_test.TestServer",
"unit.timeout_test.TestTimeout",
"unit.wait_for_connection_test.TestWaitForConnection",
"unit.wait_for_ready_test.TestWaitForReady"
]

@ -16,23 +16,23 @@
import asyncio
import logging
import unittest
import datetime
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._test_base import AioTestBase
from tests.unit import resources
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit._constants import UNREACHABLE_TARGET
_SHORT_TIMEOUT_S = datetime.timedelta(seconds=1).total_seconds()
_NUM_STREAM_RESPONSES = 5
_RESPONSE_PAYLOAD_SIZE = 42
_REQUEST_PAYLOAD_SIZE = 7
_LOCAL_CANCEL_DETAILS_EXPECTATION = 'Locally cancelled by application!'
_RESPONSE_INTERVAL_US = test_constants.SHORT_TIMEOUT * 1000 * 1000
_UNREACHABLE_TARGET = '0.1:1111'
_RESPONSE_INTERVAL_US = int(_SHORT_TIMEOUT_S * 1000 * 1000)
_INFINITE_INTERVAL_US = 2**31 - 1
@ -78,7 +78,7 @@ class TestUnaryUnaryCall(_MulticallableTestMixin, AioTestBase):
self.assertIs(response, response_retry)
async def test_call_rpc_error(self):
async with aio.insecure_channel(_UNREACHABLE_TARGET) as channel:
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
call = stub.UnaryCall(messages_pb2.SimpleRequest())
@ -434,24 +434,24 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase):
interval_us=_RESPONSE_INTERVAL_US,
))
call = self._stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT * 2)
call = self._stub.StreamingOutputCall(request,
timeout=_SHORT_TIMEOUT_S * 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the same as the timeout
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 5 / 2)
self.assertGreater(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 5 / 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the timeout minus a unit of wait time
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertGreater(remained_time, _SHORT_TIMEOUT_S / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertEqual(grpc.StatusCode.OK, await call.code())
@ -538,14 +538,14 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase):
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(test_constants.SHORT_TIMEOUT)
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.StreamingInputCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2)
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())
@ -559,6 +559,50 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase):
# No failures in the cancel later task!
await cancel_later_task
async def test_normal_iterable_requests(self):
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
requests = [request] * _NUM_STREAM_RESPONSES
# Sends out requests
call = self._stub.StreamingInputCall(requests)
# RPC should succeed
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_call_rpc_error(self):
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
# The error should be raised automatically without any traffic.
call = stub.StreamingInputCall()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
async def test_timeout(self):
call = self._stub.StreamingInputCall(timeout=_SHORT_TIMEOUT_S)
# The error should be raised automatically without any traffic.
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, rpc_error.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, await call.code())
# Prepares the request that stream in a ping-pong manner.
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2.StreamingOutputCallRequest()
@ -716,14 +760,14 @@ class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase):
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(test_constants.SHORT_TIMEOUT)
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.FullDuplexCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2)
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())
@ -738,7 +782,16 @@ class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase):
# No failures in the cancel later task!
await cancel_later_task
async def test_normal_iterable_requests(self):
requests = [_STREAM_OUTPUT_REQUEST_ONE_RESPONSE] * _NUM_STREAM_RESPONSES
call = self._stub.FullDuplexCall(iter(requests))
async for response in call:
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
if __name__ == '__main__':
logging.basicConfig()
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -226,5 +226,5 @@ class TestChannel(AioTestBase):
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -686,5 +686,5 @@ class TestInterceptedUnaryUnaryCall(AioTestBase):
if __name__ == '__main__':
logging.basicConfig()
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -134,5 +134,5 @@ class TestCloseChannel(AioTestBase):
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -35,7 +35,7 @@ class TestChannel(AioTestBase):
channel = aio.insecure_channel(server_target)
self.assertIsInstance(channel, aio.Channel)
async def tests_secure_channel(self):
async def test_secure_channel(self):
server_target, _ = await start_test_server(secure=True) # pylint: disable=unused-variable
credentials = grpc.ssl_channel_credentials(
root_certificates=_TEST_ROOT_CERTIFICATES,
@ -48,5 +48,5 @@ class TestChannel(AioTestBase):
if __name__ == '__main__':
logging.basicConfig()
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -210,14 +210,21 @@ class TestMetadata(AioTestBase):
self.assertEqual(_RESPONSE, await call)
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_from_client_to_server_with_list(self):
multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER)
call = multicallable(
_REQUEST, metadata=list(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER))
self.assertEqual(_RESPONSE, await call)
self.assertEqual(grpc.StatusCode.OK, await call.code())
@unittest.skipIf(platform.system() == 'Windows',
'https://github.com/grpc/grpc/issues/21943')
async def test_invalid_metadata(self):
multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER)
for exception_type, metadata in _INVALID_METADATA_TEST_CASES:
with self.subTest(metadata=metadata):
call = multicallable(_REQUEST, metadata=metadata)
with self.assertRaises(exception_type):
call = multicallable(_REQUEST, metadata=metadata)
await call
async def test_generic_handler(self):

@ -164,5 +164,5 @@ class TestServerInterceptor(AioTestBase):
if __name__ == '__main__':
logging.basicConfig()
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -0,0 +1,159 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests behavior of the wait for connection API on client side."""
import asyncio
import logging
import unittest
import datetime
from typing import Callable, Tuple
import grpc
from grpc.experimental import aio
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit import _common
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests_aio.unit._constants import UNREACHABLE_TARGET
_REQUEST = b'\x01\x02\x03'
_TEST_METHOD = '/test/Test'
_NUM_STREAM_RESPONSES = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_PAYLOAD_SIZE = 42
class TestWaitForConnection(AioTestBase):
"""Tests if wait_for_connection raises connectivity issue."""
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._dummy_channel = aio.insecure_channel(UNREACHABLE_TARGET)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._dummy_channel.close()
await self._channel.close()
await self._server.stop(None)
async def test_unary_unary_ok(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
# No exception raised and no message swallowed.
await call.wait_for_connection()
response = await call
self.assertIsInstance(response, messages_pb2.SimpleResponse)
async def test_unary_stream_ok(self):
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
call = self._stub.StreamingOutputCall(request)
# No exception raised and no message swallowed.
await call.wait_for_connection()
response_cnt = 0
async for response in call:
response_cnt += 1
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_stream_unary_ok(self):
call = self._stub.StreamingInputCall()
# No exception raised and no message swallowed.
await call.wait_for_connection()
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
await call.done_writing()
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_stream_stream_ok(self):
call = self._stub.FullDuplexCall()
# No exception raised and no message swallowed.
await call.wait_for_connection()
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
response = await call.read()
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_unary_unary_error(self):
call = self._dummy_channel.unary_unary(_TEST_METHOD)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_unary_stream_error(self):
call = self._dummy_channel.unary_stream(_TEST_METHOD)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_stream_unary_error(self):
call = self._dummy_channel.stream_unary(_TEST_METHOD)()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
async def test_stream_stream_error(self):
call = self._dummy_channel.stream_stream(_TEST_METHOD)()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call.wait_for_connection()
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)

@ -55,7 +55,7 @@ if cur_resolver and cur_resolver != 'ares':
'needs to use GRPC_DNS_RESOLVER=ares.'))
test_runner_log('Exit 1 without running tests.')
sys.exit(1)
os.environ.update({'GRPC_TRACE': 'cares_resolver'})
os.environ.update({'GRPC_TRACE': 'cares_resolver,cares_address_sorting'})
def wait_until_dns_server_is_up(args,
dns_server_subprocess,

@ -14,10 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM google/dart:2.3
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart
FROM google/dart:2.7
# Define the default command.
CMD ["bash"]

@ -16,7 +16,11 @@
<%include file="../../python_stretch.include"/>
<%include file="../../compile_python_36.include"/>
<%include file="../../compile_python_38.include"/>
RUN apt-get update && apt-get install -y python3.5 python3.5-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5
RUN apt-get update && apt-get -t buster install -y python3.7 python3-all-dev
RUN curl https://bootstrap.pypa.io/get-pip.py | python3.7

@ -64,8 +64,9 @@ static grpc_ares_request* my_dns_lookup_ares_locked(
const char* /*dns_server*/, const char* addr, const char* /*default_port*/,
grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
bool /*check_grpclb*/, char** /*service_config_json*/,
int /*query_timeout_ms*/, grpc_core::Combiner* /*combiner*/) {
std::unique_ptr<grpc_core::ServerAddressList>* /*balancer_addresses*/,
char** /*service_config_json*/, int /*query_timeout_ms*/,
grpc_core::Combiner* /*combiner*/) {
gpr_mu_lock(&g_mu);
GPR_ASSERT(0 == strcmp("test", addr));
grpc_error* error = GRPC_ERROR_NONE;

@ -42,7 +42,8 @@ static grpc_core::Combiner* g_combiner;
static grpc_ares_request* (*g_default_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addresses, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);
@ -93,12 +94,14 @@ static grpc_address_resolver_vtable test_resolver = {
static grpc_ares_request* test_dns_lookup_ares_locked(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addresses, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
grpc_ares_request* result = g_default_dns_lookup_ares_locked(
dns_server, name, default_port, g_iomgr_args.pollset_set, on_done,
addresses, check_grpclb, service_config_json, query_timeout_ms, combiner);
addresses, balancer_addresses, service_config_json, query_timeout_ms,
combiner);
++g_resolution_count;
static grpc_millis last_resolution_time = 0;
grpc_millis now =

@ -86,29 +86,18 @@ static grpc_core::Resolver::Result create_new_resolver_result() {
static size_t test_counter = 0;
const size_t num_addresses = 2;
char* uri_string;
char* balancer_name;
// Create address list.
grpc_core::Resolver::Result result;
for (size_t i = 0; i < num_addresses; ++i) {
gpr_asprintf(&uri_string, "ipv4:127.0.0.1:100%" PRIuPTR,
test_counter * num_addresses + i);
grpc_uri* uri = grpc_uri_parse(uri_string, true);
gpr_asprintf(&balancer_name, "balancer%" PRIuPTR,
test_counter * num_addresses + i);
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(uri, &address));
grpc_core::InlinedVector<grpc_arg, 2> args_to_add;
const bool is_balancer = num_addresses % 2;
if (is_balancer) {
args_to_add.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BALANCER), 1));
args_to_add.emplace_back(grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME), balancer_name));
}
grpc_channel_args* args = grpc_channel_args_copy_and_add(
nullptr, args_to_add.data(), args_to_add.size());
result.addresses.emplace_back(address.addr, address.len, args);
gpr_free(balancer_name);
result.addresses.emplace_back(
address.addr, address.len,
grpc_channel_args_copy_and_add(nullptr, nullptr, 0));
grpc_uri_destroy(uri);
gpr_free(uri_string);
}

@ -464,7 +464,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": \"fake:///lb\" } }\n"
" { \"eds_experimental\":{ \"clusterName\": \"foo\" } }\n"
" ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
@ -474,7 +474,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) {
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_STREQ(lb_config->name(), "xds_experimental");
EXPECT_STREQ(lb_config->name(), "eds_experimental");
}
TEST_F(ClientChannelParserTest, UnknownLoadBalancingConfig) {
@ -544,14 +544,14 @@ TEST_F(ClientChannelParserTest, UnknownLoadBalancingPolicy) {
}
TEST_F(ClientChannelParserTest, LoadBalancingPolicyXdsNotAllowed) {
const char* test_json = "{\"loadBalancingPolicy\":\"xds_experimental\"}";
const char* test_json = "{\"loadBalancingPolicy\":\"eds_experimental\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
std::regex regex(
"Service config parsing error.*referenced_errors.*"
"Global Params.*referenced_errors.*"
"Client channel global parser.*referenced_errors.*"
"field:loadBalancingPolicy error:xds_experimental requires "
"field:loadBalancingPolicy error:eds_experimental requires "
"a config. Please use loadBalancingConfig instead.");
VerifyRegexMatch(error, regex);
}

@ -39,7 +39,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::testing::LeakDetector leak_detector(true);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
grpc_slice_buffer input_buffer;
grpc_slice_buffer_init(&input_buffer);
grpc_slice_buffer_add(&input_buffer,
@ -52,7 +51,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_slice_buffer_destroy(&input_buffer);
grpc_slice_buffer_destroy(&output_buffer);
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown_blocking();
return 0;
}

@ -39,7 +39,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::testing::LeakDetector leak_detector(true);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
grpc_slice_buffer input_buffer;
grpc_slice_buffer_init(&input_buffer);
grpc_slice_buffer_add(&input_buffer,
@ -52,7 +51,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_slice_buffer_destroy(&input_buffer);
grpc_slice_buffer_destroy(&output_buffer);
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown_blocking();
return 0;
}

@ -31,7 +31,6 @@ bool leak_check = true;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::testing::LeakDetector leak_detector(true);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
auto* context = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_GZIP_COMPRESS);
grpc_slice_buffer input_buffer;
@ -48,7 +47,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_stream_compression_context_destroy(context);
grpc_slice_buffer_destroy(&input_buffer);
grpc_slice_buffer_destroy(&output_buffer);
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown_blocking();
return 0;
}

@ -31,7 +31,6 @@ bool leak_check = true;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::testing::LeakDetector leak_detector(true);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
auto* context = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
grpc_slice_buffer input_buffer;
@ -49,7 +48,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_stream_compression_context_destroy(context);
grpc_slice_buffer_destroy(&input_buffer);
grpc_slice_buffer_destroy(&output_buffer);
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown_blocking();
return 0;
}

@ -190,6 +190,13 @@ grpc_end2end_nosec_tests()
grpc_cc_test(
name = "h2_ssl_session_reuse_test",
srcs = ["h2_ssl_session_reuse_test.cc"],
data = [
"//src/core/tsi/test_creds:ca.pem",
"//src/core/tsi/test_creds:client.key",
"//src/core/tsi/test_creds:client.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
external_deps = [
"gtest",
],

@ -3,5 +3,5 @@ forms a complete end-to-end test.
To add a new test or fixture:
- add the code to the relevant directory
- update gen_build_yaml.py to reflect the change
- update generate_tests.bzl to reflect the change
- regenerate projects

@ -16,22 +16,26 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define CLIENT_CERT_PATH "src/core/tsi/test_creds/client.pem"
#define CLIENT_KEY_PATH "src/core/tsi/test_creds/client.key"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
static const char oauth2_md[] = "Bearer aaslkfjs424535asdf";
static const char* client_identity_property_name = "smurf_name";
static const char* client_identity = "Brainy Smurf";
@ -139,6 +143,11 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture* f) {
static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* client_args) {
grpc_core::ExecCtx exec_ctx;
grpc_slice ca_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
const char* test_root_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice);
grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr);
grpc_call_credentials* oauth2_creds = grpc_md_only_test_credentials_create(
@ -156,6 +165,7 @@ static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_channel_args_destroy(new_client_args);
grpc_channel_credentials_release(ssl_creds);
grpc_call_credentials_release(oauth2_creds);
grpc_slice_unref(ca_slice);
}
static int fail_server_auth_check(grpc_channel_args* server_args) {
@ -193,13 +203,23 @@ static grpc_auth_metadata_processor test_processor_create(int failing) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_server_credentials_set_auth_metadata_processor(
ssl_creds, test_processor_create(fail_server_auth_check(server_args)));
chttp2_init_server_secure_fullstack(f, server_args, ssl_creds);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
}
/* All test configurations */

@ -16,23 +16,26 @@
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <stdio.h>
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define CLIENT_CERT_PATH "src/core/tsi/test_creds/client.pem"
#define CLIENT_KEY_PATH "src/core/tsi/test_creds/client.key"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
struct fullstack_secure_fixture_data {
grpc_core::UniquePtr<char> localaddr;
@ -124,10 +127,20 @@ static int fail_server_auth_check(grpc_channel_args* server_args) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
test_server1_cert};
grpc_slice cert_slice, key_slice;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
const char* server_cert =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice);
const char* server_key =
reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice);
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
nullptr, &pem_cert_key_pair, 1, 0, nullptr);
nullptr, &pem_key_cert_pair, 1, 0, nullptr);
grpc_slice_unref(cert_slice);
grpc_slice_unref(key_slice);
if (fail_server_auth_check(server_args)) {
grpc_auth_metadata_processor processor = {process_auth_failure, nullptr,
nullptr};
@ -152,20 +165,9 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
FILE* roots_file;
size_t roots_size = strlen(test_root_cert);
char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
/* Set the SSL roots env var. */
roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
GPR_ASSERT(roots_filename != nullptr);
GPR_ASSERT(roots_file != nullptr);
GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
fclose(roots_file);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@ -174,10 +176,5 @@ int main(int argc, char** argv) {
}
grpc_shutdown();
/* Cleanup. */
remove(roots_filename);
gpr_free(roots_filename);
return 0;
}

@ -24,7 +24,6 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "test/core/util/mock_endpoint.h"
@ -42,7 +41,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_test_only_set_slice_hash_seed(0);
if (squelch) gpr_set_log_function(dont_log);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Executor::SetThreadingAll(false);
@ -158,7 +156,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_byte_buffer_destroy(response_payload_recv);
}
}
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown_blocking();
return 0;
}

@ -20,7 +20,6 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/server.h"
#include "test/core/util/mock_endpoint.h"
@ -39,7 +38,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_test_only_set_slice_hash_seed(0);
if (squelch) gpr_set_log_function(dont_log);
grpc_init();
grpc_test_only_control_plane_credentials_force_init();
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Executor::SetThreadingAll(false);
@ -133,7 +131,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_server_destroy(server);
grpc_completion_queue_destroy(cq);
}
grpc_test_only_control_plane_credentials_destroy();
grpc_shutdown();
return 0;
}

@ -11,408 +11,36 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
from __future__ import print_function
"""Generates the list of end2end test cases from generate_tests.bzl"""
import os
import sys
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel'
)
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True,
False, [], [], True, False, True, False, True, False, True, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(
fullstack=False, dns_resolver=False, client_channel=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(
secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
local_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False,
fullstack=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'],
client_channel=False)
inproc_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False,
fullstack=False,
name_resolution=False,
supports_compression=False,
is_inproc=True,
is_http2=False,
supports_write_buffering=False,
client_channel=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress':
default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census':
default_unsecure_fixture_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec':
default_secure_fixture_options._replace(ci_mac=False),
'h2_fd':
fd_unsecure_fixture_options,
'h2_full':
default_unsecure_fixture_options,
'h2_full+pipe':
default_unsecure_fixture_options._replace(platforms=['linux'],
exclude_iomgrs=['uv']),
'h2_full+trace':
default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds':
default_unsecure_fixture_options,
'h2_http_proxy':
default_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv'],
supports_proxy_auth=True),
'h2_oauth2':
default_secure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_proxy':
default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair_1byte':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_configs=['msan'],
large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair+trace':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
tracing=True,
large_writes=False,
exclude_iomgrs=['uv']),
'h2_ssl':
default_secure_fixture_options,
'h2_ssl_cred_reload':
default_secure_fixture_options,
'h2_tls':
default_secure_fixture_options,
'h2_local_uds':
local_fixture_options,
'h2_local_ipv4':
local_fixture_options,
'h2_local_ipv6':
local_fixture_options,
'h2_ssl_proxy':
default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_uds':
uds_fixture_options,
'inproc':
inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel'
)
default_test_options = TestOptions(False, False, False, True, False, True, 1.0,
[], False, False, True, False, False, False,
False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
os.chdir(_ROOT)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported':
default_test_options,
'bad_hostname':
default_test_options._replace(needs_names=True),
'bad_ping':
connectivity_test_options._replace(proxyable=False),
'binary_metadata':
default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server':
default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds':
default_test_options._replace(secure=True),
'cancel_after_accept':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status':
default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload':
default_test_options._replace(proxyable=False, needs_compression=True),
'connectivity':
connectivity_test_options._replace(needs_names=True,
proxyable=False,
cpu_cost=LOWCPU,
exclude_iomgrs=['uv']),
'channelz':
default_test_options,
'default_host':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'call_host_override':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'disappearing_server':
connectivity_test_options._replace(flaky=True, needs_names=True),
'empty_batch':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails':
default_test_options,
'filter_context':
default_test_options,
'filter_latency':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code':
default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size':
default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno':
default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request':
default_test_options,
'invoke_large_request':
default_test_options,
'keepalive_timeout':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata':
default_test_options,
'max_concurrent_streams':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_age':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_idle':
connectivity_test_options._replace(proxyable=False,
exclude_iomgrs=['uv'],
cpu_cost=LOWCPU),
'max_message_length':
default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline':
default_test_options,
'no_error_on_hotpath':
default_test_options._replace(proxyable=False),
'no_logging':
default_test_options._replace(traceable=False),
'no_op':
default_test_options,
'payload':
default_test_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'load_reporting_hook': default_test_options,
'ping_pong_streaming':
default_test_options._replace(cpu_cost=LOWCPU),
'ping':
connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth':
default_test_options._replace(needs_proxy_auth=True),
'registered_call':
default_test_options,
'request_with_flags':
default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'request_with_payload':
default_test_options._replace(cpu_cost=LOWCPU),
# TODO(roth): Remove proxyable=False for all retry tests once we
# have a way for the proxy to propagate the fact that trailing
# metadata is available when initial metadata is returned.
# See https://github.com/grpc/grpc/issues/14467 for context.
'retry':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_cancellation':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_subsequent_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status_before_recv_trailing_metadata_started':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_initial_metadata':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_message':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_delay':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_succeeds_before_replay_finished':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_throttled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_too_many_attempts':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'server_finishes_request':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags':
default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request':
default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload':
default_test_options._replace(proxyable=False, exclude_inproc=True),
'stream_compression_payload':
default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming':
default_test_options._replace(exclude_inproc=True),
'simple_delayed_request':
connectivity_test_options,
'simple_metadata':
default_test_options,
'simple_request':
default_test_options,
'streaming_error_response':
default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata':
default_test_options,
'workaround_cronet_compression':
default_test_options,
'write_buffering':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def load(*args):
"""Replacement of bazel's load() function"""
pass
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel:
return False
return True
def struct(**kwargs):
return kwargs # all the args as a dict
def without(l, e):
l = l[:]
l.remove(e)
return l
# generate_tests.bzl is now the source of truth for end2end tests.
# The .bzl file is basically a python file and we can "execute" it
# to get access to the variables it defines.
execfile('test/core/end2end/generate_tests.bzl')
# Originally, this method was used to generate end2end test cases for build.yaml,
# but since the test cases are now extracted from bazel BUILD file,
# this is not used for generating run_tests.py test cases anymore.
# Nevertheless, subset of the output is still used by end2end_tests.cc.template
# and end2end_nosec_tests.cc.template
# TODO(jtattermusch): cleanup this file, so that it only generates the data we need.
# Right now there's some duplication between generate_tests.bzl and this file.
def main():
json = {
# needed by end2end_tests.cc.template and end2end_nosec_tests.cc.template
'core_end2end_tests':
dict((t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys())
dict((t, END2END_TESTS[t]['secure']) for t in END2END_TESTS.keys())
}
print(yaml.dump(json))

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save